Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 22 Aug 2020 17:03:05 +0000 (10:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 22 Aug 2020 17:03:05 +0000 (10:03 -0700)
Pull kvm fixes from Paolo Bonzini:

 - PAE and PKU bugfixes for x86

 - selftests fix for new binutils

 - MMU notifier fix for arm64

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set
  KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
  kvm: x86: Toggling CR4.PKE does not load PDPTEs in PAE mode
  kvm: x86: Toggling CR4.SMAP does not load PDPTEs in PAE mode
  KVM: x86: fix access code passed to gva_to_gpa
  selftests: kvm: Use a shorter encoding to clear RAX

2390 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-nfit
Documentation/ABI/testing/sysfs-bus-nvdimm [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-input-exc3000 [new file with mode: 0644]
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/ext4.rst
Documentation/admin-guide/hw-vuln/multihit.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/admin-guide/sysctl/kernel.rst
Documentation/admin-guide/sysctl/vm.rst
Documentation/arm64/silicon-errata.rst
Documentation/bpf/bpf_design_QA.rst
Documentation/bpf/index.rst
Documentation/cdrom/cdrom-standard.rst
Documentation/core-api/idr.rst
Documentation/dev-tools/coccinelle.rst
Documentation/dev-tools/kgdb.rst
Documentation/devicetree/bindings/arm/arm,integrator.yaml
Documentation/devicetree/bindings/arm/arm,realview.yaml
Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,bcm21664.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,cygnus.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,hr2.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,ns2.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,nsp.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,stingray.yaml
Documentation/devicetree/bindings/arm/bcm/brcm,vulcan-soc.yaml
Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml
Documentation/devicetree/bindings/arm/coresight-cti.yaml
Documentation/devicetree/bindings/arm/cpus.yaml
Documentation/devicetree/bindings/arm/fsl.yaml
Documentation/devicetree/bindings/arm/intel,keembay.yaml
Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
Documentation/devicetree/bindings/bus/baikal,bt1-apb.yaml
Documentation/devicetree/bindings/bus/baikal,bt1-axi.yaml
Documentation/devicetree/bindings/clock/idt,versaclock5.txt [deleted file]
Documentation/devicetree/bindings/clock/idt,versaclock5.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/imx23-clock.yaml
Documentation/devicetree/bindings/clock/imx28-clock.yaml
Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
Documentation/devicetree/bindings/clock/qcom,gpucc.yaml [moved from Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml with 75% similarity]
Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
Documentation/devicetree/bindings/clock/qcom,msm8996-apcc.yaml
Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml [deleted file]
Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,cpg-clocks.yaml
Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
Documentation/devicetree/bindings/display/brcm,bcm2835-hdmi.yaml
Documentation/devicetree/bindings/display/bridge/nwl-dsi.yaml
Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
Documentation/devicetree/bindings/display/dsi-controller.yaml
Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
Documentation/devicetree/bindings/display/ingenic,ipu.yaml
Documentation/devicetree/bindings/display/ingenic,lcd.yaml
Documentation/devicetree/bindings/display/msm/gmu.yaml
Documentation/devicetree/bindings/display/panel/asus,z00t-tm5p5-nt35596.yaml
Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml
Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
Documentation/devicetree/bindings/display/panel/panel-dsi-cm.yaml
Documentation/devicetree/bindings/display/panel/panel-timing.yaml
Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
Documentation/devicetree/bindings/example-schema.yaml
Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml
Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
Documentation/devicetree/bindings/gpu/vivante,gc.yaml
Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt [deleted file]
Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
Documentation/devicetree/bindings/i2c/i2c-imx.yaml
Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
Documentation/devicetree/bindings/i2c/i2c-mxs.yaml
Documentation/devicetree/bindings/i2c/i2c-pxa.yaml
Documentation/devicetree/bindings/i2c/i2c.txt
Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
Documentation/devicetree/bindings/i2c/renesas,i2c.txt
Documentation/devicetree/bindings/i2c/renesas,iic.txt
Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
Documentation/devicetree/bindings/iio/adc/maxim,max1238.yaml
Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad5770r.yaml
Documentation/devicetree/bindings/iio/light/vishay,vcnl4000.yaml
Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml
Documentation/devicetree/bindings/iio/proximity/vishay,vcnl3020.yaml
Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
Documentation/devicetree/bindings/input/imx-keypad.yaml
Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml
Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/exc3000.txt [deleted file]
Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
Documentation/devicetree/bindings/interrupt-controller/mti,gic.yaml
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml
Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
Documentation/devicetree/bindings/media/i2c/adv7180.yaml
Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml
Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
Documentation/devicetree/bindings/media/i2c/maxim,max9286.yaml
Documentation/devicetree/bindings/media/i2c/ov8856.yaml
Documentation/devicetree/bindings/media/renesas,csi2.yaml
Documentation/devicetree/bindings/media/rockchip-vpu.yaml
Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.yaml
Documentation/devicetree/bindings/memory-controllers/fsl/mmdc.yaml
Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
Documentation/devicetree/bindings/mfd/cros-ec.txt [deleted file]
Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
Documentation/devicetree/bindings/mfd/google,cros-ec.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/khadas,mcu.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
Documentation/devicetree/bindings/mfd/st,stmfx.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
Documentation/devicetree/bindings/mfd/stmfx.txt [deleted file]
Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
Documentation/devicetree/bindings/mfd/twl-family.txt
Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdhc.yaml
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
Documentation/devicetree/bindings/mmc/mxs-mmc.yaml
Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
Documentation/devicetree/bindings/mtd/davinci-nand.txt
Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
Documentation/devicetree/bindings/mtd/mxc-nand.yaml
Documentation/devicetree/bindings/mtd/nand-controller.yaml
Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml
Documentation/devicetree/bindings/net/dsa/dsa.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/qcom,ipa.yaml
Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
Documentation/devicetree/bindings/net/stm32-dwmac.yaml
Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml
Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt [deleted file]
Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt [deleted file]
Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.txt [deleted file]
Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
Documentation/devicetree/bindings/power/power-domain.yaml
Documentation/devicetree/bindings/power/supply/gpio-charger.yaml
Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
Documentation/devicetree/bindings/regulator/qcom-labibb-regulator.yaml
Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/fsl,imx7-src.yaml
Documentation/devicetree/bindings/rtc/ingenic,rtc.yaml
Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
Documentation/devicetree/bindings/serial/ingenic,uart.yaml
Documentation/devicetree/bindings/soc/microchip/atmel,at91rm9200-tcb.yaml
Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
Documentation/devicetree/bindings/sound/cirrus,cs42l51.yaml
Documentation/devicetree/bindings/sound/ingenic,aic.yaml
Documentation/devicetree/bindings/sound/maxim,max98390.yaml
Documentation/devicetree/bindings/sound/nvidia,tegra186-dspk.yaml
Documentation/devicetree/bindings/sound/nvidia,tegra210-dmic.yaml
Documentation/devicetree/bindings/sound/nvidia,tegra210-i2s.yaml
Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
Documentation/devicetree/bindings/sound/tas2770.yaml
Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml
Documentation/devicetree/bindings/spi/mikrotik,rb4xx-spi.yaml
Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
Documentation/devicetree/bindings/spi/spi-mux.yaml
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
Documentation/devicetree/bindings/thermal/imx-thermal.yaml
Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml
Documentation/devicetree/bindings/thermal/thermal-idle.yaml
Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
Documentation/devicetree/bindings/timer/ingenic,tcu.yaml
Documentation/devicetree/bindings/timer/sifive,clint.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/timer/snps,dw-apb-timer.yaml
Documentation/devicetree/bindings/trivial-devices.yaml
Documentation/devicetree/bindings/usb/dwc2.yaml
Documentation/devicetree/bindings/usb/generic-ehci.yaml
Documentation/devicetree/bindings/usb/ingenic,musb.yaml
Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/watchdog/davinci-wdt.txt
Documentation/devicetree/bindings/watchdog/dw_wdt.txt [deleted file]
Documentation/devicetree/bindings/watchdog/qcom-wdt.txt [deleted file]
Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml [new file with mode: 0644]
Documentation/devicetree/writing-schema.rst
Documentation/driver-api/index.rst
Documentation/driver-api/nvdimm/firmware-activate.rst [new file with mode: 0644]
Documentation/driver-api/smsc_ece1099.rst [deleted file]
Documentation/features/debug/kmemleak/arch-support.txt
Documentation/features/debug/stackprotector/arch-support.txt
Documentation/features/locking/lockdep/arch-support.txt
Documentation/features/time/context-tracking/arch-support.txt
Documentation/filesystems/ext4/about.rst
Documentation/filesystems/f2fs.rst
Documentation/filesystems/journalling.rst
Documentation/filesystems/proc.rst
Documentation/filesystems/zonefs.rst
Documentation/gpu/backlight.rst [new file with mode: 0644]
Documentation/gpu/index.rst
Documentation/i2c/busses/i2c-i801.rst
Documentation/i2c/dev-interface.rst
Documentation/i2c/index.rst
Documentation/i2c/upgrading-clients.rst [deleted file]
Documentation/input/uinput.rst
Documentation/kbuild/Kconfig.recursion-issue-02
Documentation/kbuild/kconfig-language.rst
Documentation/kbuild/llvm.rst
Documentation/kbuild/makefiles.rst
Documentation/locking/locktypes.rst
Documentation/locking/seqlock.rst
Documentation/networking/bonding.rst
Documentation/process/2.Process.rst
Documentation/staging/rpmsg.rst
Documentation/trace/intel_th.rst
Documentation/translations/zh_CN/admin-guide/cpu-load.rst
Documentation/translations/zh_CN/admin-guide/index.rst
Documentation/userspace-api/ioctl/ioctl-number.rst
Documentation/vm/page_migration.rst
Documentation/watchdog/mlx-wdt.rst
Documentation/watchdog/watchdog-api.rst
Documentation/watchdog/watchdog-kernel-api.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/core_apecs.h
arch/alpha/include/asm/core_cia.h
arch/alpha/include/asm/core_lca.h
arch/alpha/include/asm/core_marvel.h
arch/alpha/include/asm/core_mcpcia.h
arch/alpha/include/asm/core_t2.h
arch/alpha/include/asm/io.h
arch/alpha/include/asm/io_trivial.h
arch/alpha/include/asm/jensen.h
arch/alpha/include/asm/machvec.h
arch/alpha/include/asm/uaccess.h
arch/alpha/kernel/core_marvel.c
arch/alpha/kernel/io.c
arch/alpha/kernel/syscalls/syscall.tbl
arch/alpha/mm/fault.c
arch/arc/include/asm/segment.h
arch/arc/kernel/process.c
arch/arc/mm/fault.c
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/configs/am200epdkit_defconfig
arch/arm/include/asm/device.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/vdso/gettimeofday.h
arch/arm/kernel/process.c
arch/arm/kernel/signal.c
arch/arm/mm/fault.c
arch/arm/tools/syscall.tbl
arch/arm64/include/asm/device.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/include/asm/vdso/gettimeofday.h
arch/arm64/kernel/sdei.c
arch/arm64/mm/fault.c
arch/arm64/mm/numa.c
arch/csky/include/asm/segment.h
arch/csky/mm/fault.c
arch/h8300/include/asm/segment.h
arch/hexagon/kernel/process.c
arch/hexagon/mm/vm_fault.c
arch/ia64/include/asm/device.h
arch/ia64/include/asm/pgtable.h
arch/ia64/include/asm/smp.h
arch/ia64/include/asm/uaccess.h
arch/ia64/include/asm/xtp.h [new file with mode: 0644]
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/process.c
arch/ia64/kernel/sal.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/smp.c
arch/ia64/kernel/syscalls/syscall.tbl
arch/ia64/mm/fault.c
arch/ia64/mm/numa.c
arch/m68k/Kconfig
arch/m68k/Kconfig.machine
arch/m68k/include/asm/segment.h
arch/m68k/include/asm/tlbflush.h
arch/m68k/kernel/syscalls/syscall.tbl
arch/m68k/mm/fault.c
arch/microblaze/include/asm/uaccess.h
arch/microblaze/kernel/syscalls/syscall.tbl
arch/microblaze/mm/fault.c
arch/mips/configs/cu1000-neo_defconfig
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/kernel/unaligned.c
arch/mips/mm/fault.c
arch/mips/vdso/Makefile
arch/nds32/include/asm/uaccess.h
arch/nds32/kernel/process.c
arch/nds32/mm/alignment.c
arch/nds32/mm/fault.c
arch/nios2/include/asm/uaccess.h
arch/nios2/kernel/process.c
arch/nios2/mm/fault.c
arch/openrisc/include/asm/io.h
arch/openrisc/include/asm/uaccess.h
arch/openrisc/kernel/process.c
arch/openrisc/kernel/setup.c
arch/openrisc/kernel/signal.c
arch/openrisc/kernel/smp.c
arch/openrisc/kernel/stacktrace.c
arch/openrisc/kernel/vmlinux.lds.S
arch/openrisc/mm/fault.c
arch/openrisc/mm/tlb.c
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/barrier.h
arch/parisc/include/asm/elf.h
arch/parisc/include/asm/io.h
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/timex.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/process.c
arch/parisc/kernel/syscalls/syscall.tbl
arch/parisc/lib/iomap.c
arch/parisc/mm/fault.c
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/device.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/pkeys.c
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/powermac/Makefile
arch/powerpc/xmon/Makefile
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/configs/nommu_virt_defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/clint.h [deleted file]
arch/riscv/include/asm/smp.h
arch/riscv/include/asm/timex.h
arch/riscv/include/asm/uaccess.h
arch/riscv/include/asm/vdso/gettimeofday.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/clint.c [deleted file]
arch/riscv/kernel/head.S
arch/riscv/kernel/sbi.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/mm/fault.c
arch/s390/Kbuild
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/include/asm/atomic.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/topology.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/Makefile
arch/s390/kernel/debug.c
arch/s390/kernel/numa.c [moved from arch/s390/numa/numa.c with 100% similarity]
arch/s390/kernel/process.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/lib/test_unwind.c
arch/s390/mm/fault.c
arch/s390/mm/gmap.c
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/boards/Kconfig
arch/sh/boards/board-sh2007.c
arch/sh/boards/mach-cayman/Makefile [deleted file]
arch/sh/boards/mach-cayman/irq.c [deleted file]
arch/sh/boards/mach-cayman/panic.c [deleted file]
arch/sh/boards/mach-cayman/setup.c [deleted file]
arch/sh/boards/mach-landisk/setup.c
arch/sh/boot/compressed/Makefile
arch/sh/configs/cayman_defconfig [deleted file]
arch/sh/configs/dreamcast_defconfig
arch/sh/configs/espt_defconfig
arch/sh/configs/hp6xx_defconfig
arch/sh/configs/landisk_defconfig
arch/sh/configs/lboxre2_defconfig
arch/sh/configs/microdev_defconfig
arch/sh/configs/migor_defconfig
arch/sh/configs/r7780mp_defconfig
arch/sh/configs/r7785rp_defconfig
arch/sh/configs/rts7751r2d1_defconfig
arch/sh/configs/rts7751r2dplus_defconfig
arch/sh/configs/se7206_defconfig
arch/sh/configs/se7343_defconfig
arch/sh/configs/se7619_defconfig
arch/sh/configs/se7705_defconfig
arch/sh/configs/se7750_defconfig
arch/sh/configs/se7751_defconfig
arch/sh/configs/secureedge5410_defconfig
arch/sh/configs/sh03_defconfig
arch/sh/configs/sh7710voipgw_defconfig
arch/sh/configs/sh7757lcr_defconfig
arch/sh/configs/sh7763rdp_defconfig
arch/sh/configs/shmin_defconfig
arch/sh/configs/titan_defconfig
arch/sh/drivers/pci/Makefile
arch/sh/drivers/pci/common.c
arch/sh/drivers/pci/fixups-cayman.c [deleted file]
arch/sh/drivers/pci/pci-sh7780.c
arch/sh/drivers/pci/pci.c
arch/sh/include/asm/adc.h
arch/sh/include/asm/addrspace.h
arch/sh/include/asm/bitops.h
arch/sh/include/asm/cache.h
arch/sh/include/asm/cacheflush.h
arch/sh/include/asm/dma.h
arch/sh/include/asm/elf.h
arch/sh/include/asm/fpu.h
arch/sh/include/asm/freq.h
arch/sh/include/asm/futex.h
arch/sh/include/asm/io.h
arch/sh/include/asm/kdebug.h
arch/sh/include/asm/mmu_context.h
arch/sh/include/asm/mmzone.h
arch/sh/include/asm/pci.h
arch/sh/include/asm/processor_32.h
arch/sh/include/asm/segment.h
arch/sh/include/asm/smc37c93x.h
arch/sh/include/asm/sparsemem.h
arch/sh/include/asm/stacktrace.h
arch/sh/include/asm/string_32.h
arch/sh/include/asm/syscall_32.h
arch/sh/include/asm/syscalls_32.h
arch/sh/include/asm/thread_info.h
arch/sh/include/asm/uaccess_32.h
arch/sh/include/asm/watchdog.h
arch/sh/kernel/Makefile
arch/sh/kernel/disassemble.c
arch/sh/kernel/dma-coherent.c
arch/sh/kernel/dumpstack.c
arch/sh/kernel/entry-common.S
arch/sh/kernel/io_trapped.c
arch/sh/kernel/iomap.c
arch/sh/kernel/ioport.c
arch/sh/kernel/machvec.c
arch/sh/kernel/perf_callchain.c
arch/sh/kernel/process_32.c
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/stacktrace.c
arch/sh/kernel/syscalls/syscall.tbl
arch/sh/kernel/traps_32.c
arch/sh/lib/Makefile
arch/sh/lib/delay.c
arch/sh/mm/Makefile
arch/sh/mm/consistent.c
arch/sh/mm/fault.c
arch/sh/mm/init.c
arch/sh/mm/ioremap.c
arch/sh/mm/ioremap.h [new file with mode: 0644]
arch/sh/mm/ioremap_fixed.c
arch/sh/mm/pgtable.c
arch/sh/oprofile/backtrace.c
arch/sh/tools/mach-types
arch/sparc/include/asm/sparsemem.h
arch/sparc/include/asm/timer_64.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/include/asm/vvar.h
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/syscalls/syscall.tbl
arch/sparc/kernel/vdso.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/vdso/Makefile
arch/um/Kconfig
arch/um/Makefile
arch/um/drivers/virtio_uml.c
arch/um/kernel/trap.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/boot/string.h
arch/x86/entry/entry_32.S
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vdso32/note.S
arch/x86/events/rapl.c
arch/x86/include/asm/device.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/proto.h
arch/x86/include/asm/segment.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/tsc.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/vdso/gettimeofday.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/apic/ipi.c
arch/x86/kernel/apic/local.h
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/probe_64.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/acrn.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/hygon.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/crash.c
arch/x86/kernel/devicetree.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head_32.S
arch/x86/kernel/irqinit.c
arch/x86/kernel/jailhouse.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/topology.c
arch/x86/kernel/tsc_msr.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/mm/Makefile
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/numa.c
arch/x86/pci/xen.c
arch/x86/power/Makefile
arch/x86/purgatory/Makefile
arch/x86/um/vdso/Makefile
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/apic.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/smp_hvm.c
arch/x86/xen/smp_pv.c
arch/x86/xen/suspend_pv.c
arch/x86/xen/vdso.h [deleted file]
arch/x86/xen/xen-asm.S
arch/x86/xen/xen-asm_32.S [deleted file]
arch/x86/xen/xen-asm_64.S [deleted file]
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
arch/xtensa/boot/boot-elf/Makefile
arch/xtensa/include/asm/uaccess.h
arch/xtensa/kernel/syscalls/syscall.tbl
arch/xtensa/mm/fault.c
block/blk-flush.c
block/blk-iocost.c
block/blk-lib.c
crypto/algif_aead.c
crypto/algif_skcipher.c
drivers/acpi/acpi_apd.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/intel.c
drivers/acpi/nfit/intel.h
drivers/acpi/nfit/nfit.h
drivers/block/loop.c
drivers/block/rnbd/rnbd-srv-dev.c
drivers/block/rnbd/rnbd-srv-dev.h
drivers/block/rnbd/rnbd-srv.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/actions/owl-s500.c
drivers/clk/at91/Makefile
drivers/clk/at91/at91rm9200.c
drivers/clk/at91/at91sam9260.c
drivers/clk/at91/at91sam9g45.c
drivers/clk/at91/at91sam9n12.c
drivers/clk/at91/at91sam9rl.c
drivers/clk/at91/at91sam9x5.c
drivers/clk/at91/clk-generated.c
drivers/clk/at91/clk-main.c
drivers/clk/at91/clk-master.c
drivers/clk/at91/clk-peripheral.c
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/clk-sam9x60-pll.c
drivers/clk/at91/clk-system.c
drivers/clk/at91/clk-utmi.c
drivers/clk/at91/dt-compat.c
drivers/clk/at91/pmc.h
drivers/clk/at91/sam9x60.c
drivers/clk/at91/sama5d2.c
drivers/clk/at91/sama5d3.c
drivers/clk/at91/sama5d4.c
drivers/clk/at91/sama7g5.c [new file with mode: 0644]
drivers/clk/at91/sckc.c
drivers/clk/bcm/clk-bcm2835.c
drivers/clk/bcm/clk-iproc-asiu.c
drivers/clk/clk-pwm.c
drivers/clk/clk-qoriq.c
drivers/clk/clk-sparx5.c [new file with mode: 0644]
drivers/clk/clk-versaclock5.c
drivers/clk/clk.c
drivers/clk/davinci/pll.c
drivers/clk/imx/clk-pllv3.c
drivers/clk/ingenic/jz4780-cgu.c
drivers/clk/ingenic/x1000-cgu.c
drivers/clk/ingenic/x1830-cgu.c
drivers/clk/mmp/clk-pxa168.c
drivers/clk/mmp/clk-pxa910.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-alpha-pll.h
drivers/clk/qcom/gcc-sc7180.c
drivers/clk/qcom/gcc-sdm660.c
drivers/clk/qcom/gcc-sm8150.c
drivers/clk/qcom/gdsc.c
drivers/clk/qcom/gdsc.h
drivers/clk/qcom/gpucc-sc7180.c
drivers/clk/qcom/gpucc-sdm845.c
drivers/clk/qcom/gpucc-sm8150.c [new file with mode: 0644]
drivers/clk/qcom/gpucc-sm8250.c [new file with mode: 0644]
drivers/clk/qcom/lpasscorecc-sc7180.c [new file with mode: 0644]
drivers/clk/rockchip/clk-pll.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/rockchip/clk-rk3328.c
drivers/clk/sirf/clk-atlas6.c
drivers/clk/tegra/clk-pll.c
drivers/clk/x86/Makefile
drivers/clk/x86/clk-cgu-pll.c
drivers/clk/x86/clk-cgu.c
drivers/clk/x86/clk-fch.c [new file with mode: 0644]
drivers/clk/x86/clk-st.c [deleted file]
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/timer-clint.c [new file with mode: 0644]
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-stm32-lp.c [new file with mode: 0644]
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/virtio/virtio_crypto_core.c
drivers/dax/super.c
drivers/dma-buf/dma-resv.c
drivers/edac/ie31200_edac.c
drivers/firmware/arm_sdei.c
drivers/firmware/efi/libstub/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/include/bios_parser_types.h
drivers/gpu/drm/amd/display/include/fixed31_32.h
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/powerplay/smu_cmn.c
drivers/gpu/drm/amd/powerplay/smu_internal.h
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_panel.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/selftests/i915_buddy.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front.h
drivers/gpu/drm/xen/xen_drm_front_conn.c
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/gpu/drm/xen/xen_drm_front_kms.c
drivers/gpu/drm/xlnx/zynqmp_dp.c
drivers/gpu/vga/vgaarb.c
drivers/hid/Kconfig
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lenovo.c
drivers/hid/hid-mcp2221.c
drivers/hid/hid-quirks.c
drivers/hid/hid-udraw-ps3.c
drivers/hid/hid-wiimote-core.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/hid-wiimote.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/usbhid.h
drivers/hv/vmbus_drv.c
drivers/hwmon/pwm-fan.c
drivers/hwspinlock/Kconfig
drivers/hwspinlock/qcom_hwspinlock.c
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-ali1535.c
drivers/i2c/busses/i2c-ali15x3.c
drivers/i2c/busses/i2c-amd8111.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-at91-master.c
drivers/i2c/busses/i2c-at91.h
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-digicolor.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-emev2.c
drivers/i2c/busses/i2c-fsi.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-sibyte.c
drivers/i2c/busses/i2c-sirf.c
drivers/i2c/busses/i2c-synquacer.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/busses/i2c-viapro.c
drivers/i2c/busses/scx200_acb.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-slave-eeprom.c
drivers/infiniband/core/device.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/input/input-mt.c
drivers/input/joystick/db9.c
drivers/input/joystick/gamecon.c
drivers/input/joystick/sidewinder.c
drivers/input/joystick/spaceball.c
drivers/input/keyboard/adp5589-keys.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/gpio_keys.c
drivers/input/misc/ati_remote2.c
drivers/input/misc/cm109.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/iqs269a.c
drivers/input/misc/pwm-vibra.c
drivers/input/misc/xen-kbdfront.c
drivers/input/mouse/alps.c
drivers/input/mouse/appletouch.c
drivers/input/mouse/cyapa_gen3.c
drivers/input/mouse/cyapa_gen5.c
drivers/input/mouse/cyapa_gen6.c
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/elantech.c
drivers/input/mouse/hgpk.c
drivers/input/mouse/navpoint.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/sentelic.c
drivers/input/mouse/sermouse.c
drivers/input/serio/i8042-io.h
drivers/input/serio/i8042.c
drivers/input/serio/libps2.c
drivers/input/sparse-keymap.c
drivers/input/tablet/gtco.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/elo.c
drivers/input/touchscreen/exc3000.c
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/max11801_ts.c
drivers/input/touchscreen/stmfts.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd/Kconfig [new file with mode: 0644]
drivers/iommu/amd/Makefile [new file with mode: 0644]
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/amd/iommu_v2.c
drivers/iommu/arm/Makefile [moved from arch/s390/numa/Makefile with 52% similarity]
drivers/iommu/arm/arm-smmu-v3/Makefile [new file with mode: 0644]
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c [moved from drivers/iommu/arm-smmu-v3.c with 99% similarity]
drivers/iommu/arm/arm-smmu/Makefile [new file with mode: 0644]
drivers/iommu/arm/arm-smmu/arm-smmu-impl.c [moved from drivers/iommu/arm-smmu-impl.c with 72% similarity]
drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c [new file with mode: 0644]
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c [moved from drivers/iommu/arm-smmu-qcom.c with 100% similarity]
drivers/iommu/arm/arm-smmu/arm-smmu.c [moved from drivers/iommu/arm-smmu.c with 98% similarity]
drivers/iommu/arm/arm-smmu/arm-smmu.h [moved from drivers/iommu/arm-smmu.h with 98% similarity]
drivers/iommu/arm/arm-smmu/qcom_iommu.c [moved from drivers/iommu/qcom_iommu.c with 95% similarity]
drivers/iommu/dma-iommu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/fsl_pamu.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel/Kconfig [new file with mode: 0644]
drivers/iommu/intel/Makefile [new file with mode: 0644]
drivers/iommu/intel/debugfs.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/irq_remapping.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h [moved from drivers/iommu/intel/intel-pasid.h with 98% similarity]
drivers/iommu/intel/svm.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.h
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/omap-iommu-debug.c
drivers/iommu/omap-iommu.c
drivers/iommu/rockchip-iommu.c
drivers/iommu/tegra-gart.c
drivers/iommu/tegra-smmu.c
drivers/iommu/virtio-iommu.c
drivers/mailbox/bcm-pdc-mailbox.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
drivers/memory/Kconfig
drivers/memory/Makefile
drivers/memory/mtk-smi.c
drivers/memory/stm32-fmc2-ebi.c [new file with mode: 0644]
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab3100-core.c
drivers/mfd/ab3100-otp.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/altera-sysmgr.c
drivers/mfd/arizona-core.c
drivers/mfd/atmel-smc.c
drivers/mfd/axp20x-i2c.c
drivers/mfd/cros_ec_dev.c
drivers/mfd/da9063-core.c
drivers/mfd/da9063-i2c.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/dln2.c
drivers/mfd/hi6421-pmic-core.c
drivers/mfd/intel-lpss-pci.c
drivers/mfd/intel_soc_pmic_mrfld.c
drivers/mfd/kempld-core.c
drivers/mfd/khadas-mcu.c [new file with mode: 0644]
drivers/mfd/lm3533-ctrlbank.c
drivers/mfd/lp873x.c
drivers/mfd/lp87565.c
drivers/mfd/madera-core.c
drivers/mfd/madera-i2c.c
drivers/mfd/max14577.c
drivers/mfd/mfd-core.c
drivers/mfd/motorola-cpcap.c
drivers/mfd/omap-usb-host.c
drivers/mfd/omap-usb-tll.c
drivers/mfd/rave-sp.c
drivers/mfd/rn5t618.c
drivers/mfd/si476x-cmd.c
drivers/mfd/si476x-i2c.c
drivers/mfd/sky81452.c
drivers/mfd/smsc-ece1099.c [deleted file]
drivers/mfd/sprd-sc27xx-spi.c
drivers/mfd/stm32-lptimer.c
drivers/mfd/syscon.c
drivers/mfd/tc3589x.c
drivers/mfd/ti_am335x_tscadc.c
drivers/mfd/tps65010.c
drivers/mfd/tps65086.c
drivers/mfd/tps65217.c
drivers/mfd/tps65218.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65912-core.c
drivers/mfd/tps65912-i2c.c
drivers/mfd/tps65912-spi.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm831x-core.c
drivers/mfd/wm8350-core.c
drivers/mfd/wm8400-core.c
drivers/mtd/chips/Kconfig
drivers/mtd/hyperbus/hbmc-am654.c
drivers/mtd/hyperbus/hyperbus-core.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/sc520cdp.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/onenand/Kconfig
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/ams-delta.c
drivers/mtd/nand/raw/arasan-nand-controller.c
drivers/mtd/nand/raw/atmel/nand-controller.c
drivers/mtd/nand/raw/brcmnand/brcmnand.c
drivers/mtd/nand/raw/cadence-nand-controller.c
drivers/mtd/nand/raw/denali.c
drivers/mtd/nand/raw/fsl_upm.c
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/gpio.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/ingenic/jz4740_ecc.c
drivers/mtd/nand/raw/internals.h
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/meson_nand.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/mxic_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_bbt.c
drivers/mtd/nand/raw/nand_hynix.c
drivers/mtd/nand/raw/nand_ids.c
drivers/mtd/nand/raw/nand_legacy.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/nand/raw/nand_timings.c
drivers/mtd/nand/raw/nand_toshiba.c
drivers/mtd/nand/raw/omap_elm.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/nand/raw/s3c2410.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/nand/raw/sunxi_nand.c
drivers/mtd/nand/raw/tango_nand.c
drivers/mtd/nand/raw/tegra_nand.c
drivers/mtd/parsers/bcm63xxpart.c
drivers/mtd/spi-nor/controllers/intel-spi-pci.c
drivers/mtd/spi-nor/controllers/intel-spi.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/core.h
drivers/mtd/spi-nor/macronix.c
drivers/mtd/spi-nor/micron-st.c
drivers/mtd/spi-nor/sfdp.c
drivers/mtd/spi-nor/spansion.c
drivers/mtd/spi-nor/winbond.c
drivers/mtd/ubi/fastmap-wl.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/3com/3c574_cs.c
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_devlink.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/sfc/ef100_nic.c
drivers/net/ethernet/sfc/ef100_rx.c
drivers/net/ethernet/sfc/ef100_rx.h
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/fddi/skfp/cfm.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/hwmtm.c
drivers/net/fddi/skfp/smt.c
drivers/net/ipa/ipa.h
drivers/net/ipa/ipa_modem.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/marvell10g.c
drivers/net/phy/phy_device.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wan/dlci.c
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc_x25.c
drivers/net/wan/lapbether.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
drivers/ntb/hw/intel/ntb_hw_gen1.c
drivers/ntb/hw/intel/ntb_hw_gen3.h
drivers/ntb/hw/intel/ntb_hw_intel.h
drivers/nvdimm/btt.c
drivers/nvdimm/bus.c
drivers/nvdimm/core.c
drivers/nvdimm/dimm_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/region_devs.c
drivers/nvdimm/security.c
drivers/nvdimm/virtio_pmem.c
drivers/of/address.c
drivers/opp/core.c
drivers/parisc/sba_iommu.c
drivers/pci/p2pdma.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/bcm/pinctrl-bcm281xx.c
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
drivers/pinctrl/core.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/freescale/Kconfig
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/freescale/pinctrl-imx8dxl.c
drivers/pinctrl/freescale/pinctrl-imx8mm.c
drivers/pinctrl/freescale/pinctrl-imx8mn.c
drivers/pinctrl/freescale/pinctrl-imx8mp.c
drivers/pinctrl/freescale/pinctrl-imx8mq.c
drivers/pinctrl/freescale/pinctrl-imx8qm.c
drivers/pinctrl/freescale/pinctrl-imx8qxp.c
drivers/pinctrl/freescale/pinctrl-scu.c
drivers/pinctrl/intel/Kconfig
drivers/pinctrl/intel/Makefile
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-emmitsburg.c [new file with mode: 0644]
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/intel/pinctrl-intel.h
drivers/pinctrl/intel/pinctrl-lynxpoint.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/mediatek/Kconfig
drivers/pinctrl/mediatek/Makefile
drivers/pinctrl/mediatek/pinctrl-mt6779.c [new file with mode: 0644]
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h [new file with mode: 0644]
drivers/pinctrl/mediatek/pinctrl-paris.c
drivers/pinctrl/meson/pinctrl-meson-a1.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-at91-pio4.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-bm1880.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-lpc18xx.c
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-rza1.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-stmfx.c
drivers/pinctrl/pinctrl-sx150x.c
drivers/pinctrl/pinmux.c
drivers/pinctrl/qcom/pinctrl-ipq4019.c
drivers/pinctrl/qcom/pinctrl-ipq8074.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/pinctrl/qcom/pinctrl-msm8976.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-s3c24xx.c
drivers/pinctrl/samsung/pinctrl-s3c64xx.c
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/sh-pfc/Kconfig
drivers/pinctrl/sh-pfc/Makefile
drivers/pinctrl/sh-pfc/core.c
drivers/pinctrl/sh-pfc/pfc-r8a77951.c
drivers/pinctrl/sh-pfc/pfc-r8a77970.c
drivers/pinctrl/sh-pfc/pfc-r8a77980.c
drivers/pinctrl/sh-pfc/sh_pfc.h
drivers/pinctrl/sirf/pinctrl-atlas7.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/tegra/pinctrl-tegra194.c
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
drivers/platform/chrome/Kconfig
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/cros_ec_ishtp.c
drivers/platform/chrome/cros_ec_proto.c
drivers/platform/chrome/cros_ec_rpmsg.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/platform/chrome/cros_ec_spi.c
drivers/platform/chrome/cros_ec_typec.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/x86/mlx-platform.c
drivers/pwm/core.c
drivers/pwm/pwm-bcm-iproc.c
drivers/pwm/pwm-bcm-kona.c
drivers/pwm/pwm-clps711x.c
drivers/pwm/pwm-imx-tpm.c
drivers/pwm/pwm-imx27.c
drivers/pwm/pwm-iqs620a.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-omap-dmtimer.c
drivers/pwm/pwm-sifive.c
drivers/pwm/pwm-stm32-lp.c
drivers/pwm/pwm-sun4i.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/sysfs.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/rapidio/rio-scan.c
drivers/remoteproc/Kconfig
drivers/remoteproc/Makefile
drivers/remoteproc/ingenic_rproc.c
drivers/remoteproc/qcom_common.c
drivers/remoteproc/qcom_common.h
drivers/remoteproc/qcom_pil_info.c [new file with mode: 0644]
drivers/remoteproc/qcom_pil_info.h [new file with mode: 0644]
drivers/remoteproc/qcom_q6v5.c
drivers/remoteproc/qcom_q6v5_adsp.c
drivers/remoteproc/qcom_q6v5_ipa_notify.c [deleted file]
drivers/remoteproc/qcom_q6v5_mss.c
drivers/remoteproc/qcom_q6v5_pas.c
drivers/remoteproc/qcom_q6v5_wcss.c
drivers/remoteproc/qcom_sysmon.c
drivers/remoteproc/qcom_wcnss.c
drivers/remoteproc/remoteproc_cdev.c [new file with mode: 0644]
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_coredump.c [new file with mode: 0644]
drivers/remoteproc/remoteproc_debugfs.c
drivers/remoteproc/remoteproc_internal.h
drivers/remoteproc/remoteproc_sysfs.c
drivers/remoteproc/stm32_rproc.c
drivers/remoteproc/ti_k3_dsp_remoteproc.c [new file with mode: 0644]
drivers/remoteproc/ti_sci_proc.h [new file with mode: 0644]
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/Kconfig
drivers/rtc/rtc-ab-b5ze-s3.c
drivers/rtc/rtc-bq32k.c
drivers/rtc/rtc-cpcap.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1374.c
drivers/rtc/rtc-goldfish.c
drivers/rtc/rtc-imxdi.c
drivers/rtc/rtc-max77686.c
drivers/rtc/rtc-mcp795.c
drivers/rtc/rtc-pcf2127.c
drivers/rtc/rtc-pcf85063.c
drivers/rtc/rtc-pl031.c
drivers/s390/crypto/pkey_api.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/libfc/fc_disc.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_zbc.c
drivers/scsi/ufs/ti-j721e-ufs.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/virtio_scsi.c
drivers/sh/clk/cpg.c
drivers/spi/Kconfig
drivers/spi/spi-stm32.c
drivers/spi/spi.c
drivers/target/iscsi/iscsi_target_transport.c
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/khadas_mcu_fan.c [new file with mode: 0644]
drivers/thunderbolt/test.c
drivers/vdpa/Kconfig
drivers/vdpa/Makefile
drivers/vdpa/ifcvf/ifcvf_base.c
drivers/vdpa/ifcvf/ifcvf_base.h
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/Makefile [new file with mode: 0644]
drivers/vdpa/mlx5/core/mlx5_vdpa.h [new file with mode: 0644]
drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h [new file with mode: 0644]
drivers/vdpa/mlx5/core/mr.c [new file with mode: 0644]
drivers/vdpa/mlx5/core/resources.c [new file with mode: 0644]
drivers/vdpa/mlx5/net/main.c [new file with mode: 0644]
drivers/vdpa/mlx5/net/mlx5_vnet.c [new file with mode: 0644]
drivers/vdpa/mlx5/net/mlx5_vnet.h [new file with mode: 0644]
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/Kconfig
drivers/vhost/net.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/video/backlight/88pm860x_bl.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/adp5520_bl.c
drivers/video/backlight/adp8860_bl.c
drivers/video/backlight/adp8870_bl.c
drivers/video/backlight/as3711_bl.c
drivers/video/backlight/backlight.c
drivers/video/backlight/bd6107.c
drivers/video/backlight/corgi_lcd.c
drivers/video/backlight/cr_bllcd.c
drivers/video/backlight/da903x_bl.c
drivers/video/backlight/ep93xx_bl.c
drivers/video/backlight/generic_bl.c [deleted file]
drivers/video/backlight/gpio_backlight.c
drivers/video/backlight/hp680_bl.c
drivers/video/backlight/ili922x.c
drivers/video/backlight/jornada720_bl.c
drivers/video/backlight/kb3886_bl.c
drivers/video/backlight/lcd.c
drivers/video/backlight/led_bl.c
drivers/video/backlight/lm3533_bl.c
drivers/video/backlight/lm3630a_bl.c
drivers/video/backlight/lms501kf03.c
drivers/video/backlight/locomolcd.c
drivers/video/backlight/lv5207lp.c
drivers/video/backlight/max8925_bl.c
drivers/video/backlight/ot200_bl.c [deleted file]
drivers/video/backlight/pwm_bl.c
drivers/video/backlight/qcom-wled.c
drivers/video/backlight/sky81452-backlight.c
drivers/video/backlight/tps65217_bl.c
drivers/video/backlight/wm831x_bl.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/core/fbsysfs.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/ps3fb.c
drivers/video/fbdev/ssd1307fb.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_input.c
drivers/virtio/virtio_mem.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_ring.c
drivers/virtio/virtio_vdpa.c
drivers/watchdog/Kconfig
drivers/watchdog/advantechwdt.c
drivers/watchdog/alim1535_wdt.c
drivers/watchdog/alim7101_wdt.c
drivers/watchdog/ar7_wdt.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/bcm_kona_wdt.c
drivers/watchdog/booke_wdt.c
drivers/watchdog/dw_wdt.c
drivers/watchdog/eurotechwdt.c
drivers/watchdog/f71808e_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/geodewdt.c
drivers/watchdog/ib700wdt.c
drivers/watchdog/it8712f_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/m54xx_wdt.c
drivers/watchdog/machzwd.c
drivers/watchdog/mlx_wdt.c
drivers/watchdog/mv64x60_wdt.c
drivers/watchdog/nv_tco.c
drivers/watchdog/nv_tco.h
drivers/watchdog/pc87413_wdt.c
drivers/watchdog/pcwd.c
drivers/watchdog/pcwd_pci.c
drivers/watchdog/pcwd_usb.c
drivers/watchdog/rc32434_wdt.c
drivers/watchdog/riowd.c
drivers/watchdog/rti_wdt.c
drivers/watchdog/sa1100_wdt.c
drivers/watchdog/sb_wdog.c
drivers/watchdog/sbc60xxwdt.c
drivers/watchdog/sbc7240_wdt.c
drivers/watchdog/sbc_fitpc2_wdt.c
drivers/watchdog/sc520_wdt.c
drivers/watchdog/sch311x_wdt.c
drivers/watchdog/scx200_wdt.c
drivers/watchdog/smsc37b787_wdt.c
drivers/watchdog/softdog.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/sunxi_wdt.c
drivers/watchdog/w83877f_wdt.c
drivers/watchdog/w83977f_wdt.c
drivers/watchdog/wafer5823wdt.c
drivers/watchdog/watchdog_dev.c
drivers/watchdog/wdt.c
drivers/watchdog/wdt285.c
drivers/watchdog/wdt977.c
drivers/watchdog/wdt_pci.c
drivers/xen/Kconfig
drivers/xen/Makefile
drivers/xen/gntdev-dmabuf.c
fs/9p/v9fs.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/afs/dynroot.c
fs/afs/fs_operation.c
fs/autofs/dev-ioctl.c
fs/btrfs/backref.c
fs/btrfs/extent-io-tree.h
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/Kconfig
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/metric.c
fs/ceph/metric.h
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/smb2inode.c
fs/cifs/smb2pdu.c
fs/coredump.c
fs/dax.c
fs/dcache.c
fs/exec.c
fs/exfat/balloc.c
fs/exfat/dir.c
fs/exfat/exfat_fs.h
fs/exfat/exfat_raw.h
fs/exfat/fatent.c
fs/exfat/file.c
fs/exfat/inode.c
fs/exfat/misc.c
fs/exfat/namei.c
fs/exfat/super.c
fs/ext4/Kconfig
fs/ext4/balloc.c
fs/ext4/block_validity.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/hash.c
fs/ext4/indirect.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/ext4/xattr.c
fs/f2fs/checkpoint.c
fs/f2fs/compress.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/f2fs/verity.c
fs/f2fs/xattr.c
fs/fat/Kconfig
fs/fat/fatent.c
fs/fat/file.c
fs/fs_struct.c
fs/fuse/virtio_fs.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/log.c
fs/gfs2/super.c
fs/gfs2/trans.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/jbd2/journal.c
fs/jbd2/recovery.c
fs/jbd2/transaction.c
fs/jffs2/dir.c
fs/jffs2/scan.c
fs/locks.c
fs/minix/inode.c
fs/minix/itree_common.c
fs/minix/itree_v1.c
fs/minix/itree_v2.c
fs/minix/minix.h
fs/namei.c
fs/nfs/Makefile
fs/nfs/blocklayout/rpc_pipefs.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/fs_context.c
fs/nfs/inode.c
fs/nfs/nfs42.h
fs/nfs/nfs42proc.c
fs/nfs/nfs42xattr.c [new file with mode: 0644]
fs/nfs/nfs42xdr.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4super.c
fs/nfs/nfs4trace.h
fs/nfs/nfs4xdr.c
fs/nfs/nfstrace.h
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfsd/netns.h
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsd.h
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/nfsd/xdr4.h
fs/nilfs2/alloc.c
fs/nilfs2/btree.c
fs/nilfs2/cpfile.c
fs/nilfs2/dat.c
fs/nilfs2/direct.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segment.c
fs/nilfs2/sufile.c
fs/nilfs2/super.c
fs/nilfs2/sysfs.c
fs/nilfs2/the_nilfs.c
fs/open.c
fs/orangefs/acl.c
fs/orangefs/orangefs-mod.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/romfs/storage.c
fs/signalfd.c
fs/squashfs/block.c
fs/ubifs/journal.c
fs/ubifs/misc.h
fs/ubifs/sb.c
fs/ubifs/super.c
fs/ubifs/ubifs.h
fs/ufs/super.c
fs/userfaultfd.c
fs/xattr.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_export.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_sysfs.h
fs/xfs/xfs_trans_ail.c
fs/zonefs/super.c
fs/zonefs/zonefs.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/mshyperv.h
include/asm-generic/pgalloc.h
include/asm-generic/sections.h
include/asm-generic/uaccess.h
include/asm-generic/vmlinux.lds.h
include/clocksource/timer-ti-dm.h
include/dt-bindings/clock/actions,s500-cmu.h
include/dt-bindings/clock/jz4780-cgu.h
include/dt-bindings/clock/qcom,gcc-sc7180.h
include/dt-bindings/clock/qcom,gcc-sdm660.h
include/dt-bindings/clock/qcom,gpucc-sm8150.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,gpucc-sm8250.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h [new file with mode: 0644]
include/dt-bindings/clock/x1000-cgu.h
include/dt-bindings/clock/x1830-cgu.h
include/dt-bindings/memory/mt6779-larb-port.h [new file with mode: 0644]
include/dt-bindings/pinctrl/mt6779-pinfunc.h [new file with mode: 0644]
include/dt-bindings/pinctrl/omap.h
include/dt-bindings/reset/actions,s500-reset.h [new file with mode: 0644]
include/linux/async_tx.h
include/linux/backlight.h
include/linux/bitfield.h
include/linux/bpf.h
include/linux/btree.h
include/linux/ceph/ceph_features.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/libceph.h
include/linux/ceph/osd_client.h
include/linux/clk-provider.h
include/linux/clk/at91_pmc.h
include/linux/compaction.h
include/linux/compat.h
include/linux/compiler-clang.h
include/linux/compiler_types.h
include/linux/cpufreq.h
include/linux/cpuhotplug.h
include/linux/crash_core.h
include/linux/crush/crush.h
include/linux/dcache.h
include/linux/delay.h
include/linux/dma-debug.h
include/linux/dma-direct.h
include/linux/dma-mapping.h
include/linux/dma-resv.h
include/linux/dma/k3-psil.h
include/linux/dma/k3-udma-glue.h
include/linux/dma/ti-cppi5.h
include/linux/dmar.h
include/linux/dynamic_queue_limits.h
include/linux/exportfs.h
include/linux/fb.h
include/linux/frontswap.h
include/linux/fs.h
include/linux/fs_struct.h
include/linux/generic-radix-tree.h
include/linux/highmem.h
include/linux/hrtimer.h
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/hw_breakpoint.h
include/linux/i2c.h
include/linux/intel-iommu.h
include/linux/io-64-nonatomic-hi-lo.h
include/linux/io-64-nonatomic-lo-hi.h
include/linux/io-pgtable.h
include/linux/iommu.h
include/linux/irqchip/irq-omap-intc.h
include/linux/jbd2.h
include/linux/jhash.h
include/linux/kernel.h
include/linux/ktime.h
include/linux/kvm_irqfd.h
include/linux/leds-ti-lmu-common.h
include/linux/libnvdimm.h
include/linux/lockdep.h
include/linux/lsm_hook_defs.h
include/linux/lsm_hooks.h
include/linux/memcontrol.h
include/linux/mempolicy.h
include/linux/mfd/core.h
include/linux/mfd/da9055/pdata.h
include/linux/mfd/da9063/core.h
include/linux/mfd/da9063/registers.h
include/linux/mfd/hi6421-pmic.h
include/linux/mfd/khadas-mcu.h [new file with mode: 0644]
include/linux/mfd/lp873x.h
include/linux/mfd/lp87565.h
include/linux/mfd/madera/pdata.h
include/linux/mfd/max77693-private.h
include/linux/mfd/sky81452.h
include/linux/mfd/smsc.h [deleted file]
include/linux/mfd/stm32-lptimer.h
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mfd/tps65086.h
include/linux/mfd/tps65217.h
include/linux/mfd/tps65218.h
include/linux/mfd/tps65912.h
include/linux/migrate.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/module.h
include/linux/moduleparam.h
include/linux/mtd/hyperbus.h
include/linux/mtd/nand.h
include/linux/mtd/pfow.h
include/linux/mtd/rawnand.h
include/linux/mtd/spinand.h
include/linux/mutex.h
include/linux/netfilter_ipv6.h
include/linux/nfs4.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/oom.h
include/linux/pagemap.h
include/linux/pci_ids.h
include/linux/pgtable.h
include/linux/phylink.h
include/linux/platform_data/clk-fch.h [moved from include/linux/platform_data/clk-st.h with 51% similarity]
include/linux/platform_data/cros_ec_commands.h
include/linux/platform_data/cros_ec_proto.h
include/linux/platform_data/davinci-cpufreq.h
include/linux/platform_data/davinci_asp.h
include/linux/platform_data/elm.h
include/linux/platform_data/gpio-davinci.h
include/linux/platform_data/gpmc-omap.h
include/linux/platform_data/mlxreg.h
include/linux/platform_data/mtd-davinci-aemif.h
include/linux/platform_data/omap-twl4030.h
include/linux/platform_data/sky81452-backlight.h [deleted file]
include/linux/platform_data/uio_pruss.h
include/linux/platform_data/usb-omap.h
include/linux/poison.h
include/linux/posix-timers.h
include/linux/pwm.h
include/linux/remoteproc.h
include/linux/remoteproc/qcom_q6v5_ipa_notify.h [deleted file]
include/linux/remoteproc/qcom_rproc.h
include/linux/rmi.h
include/linux/rtc.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sched/task.h
include/linux/sched/user.h
include/linux/seqlock.h
include/linux/soc/ti/k3-ringacc.h
include/linux/soc/ti/knav_qmss.h
include/linux/soc/ti/ti-msgmgr.h
include/linux/sockptr.h
include/linux/sunrpc/rpc_rdma.h
include/linux/sunrpc/rpc_rdma_cid.h [new file with mode: 0644]
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/xdr.h
include/linux/sunrpc/xprt.h
include/linux/suspend.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/sysctl.h
include/linux/time.h
include/linux/uaccess.h
include/linux/vdpa.h
include/linux/vgaarb.h
include/linux/videodev2.h
include/linux/virtio_caif.h
include/linux/virtio_config.h
include/linux/virtio_ring.h
include/linux/vm_event_item.h
include/linux/watchdog.h
include/linux/wkup_m3_ipc.h
include/linux/ww_mutex.h
include/linux/xattr.h
include/linux/xxhash.h
include/linux/xz.h
include/linux/zlib.h
include/net/inet_connection_sock.h
include/net/netfilter/nf_conntrack.h
include/net/tcp.h
include/soc/arc/aux.h
include/target/iscsi/iscsi_transport.h
include/trace/events/ext4.h
include/trace/events/f2fs.h
include/trace/events/migrate.h
include/trace/events/random.h
include/trace/events/rpcgss.h
include/trace/events/rpcrdma.h
include/trace/events/sunrpc.h
include/uapi/drm/drm_fourcc.h
include/uapi/linux/auto_dev-ioctl.h
include/uapi/linux/bpf.h
include/uapi/linux/elf.h
include/uapi/linux/iommu.h
include/uapi/linux/map_to_7segment.h
include/uapi/linux/ndctl.h
include/uapi/linux/nfs4.h
include/uapi/linux/nfs_fs.h
include/uapi/linux/remoteproc_cdev.h [new file with mode: 0644]
include/uapi/linux/types.h
include/uapi/linux/usb/ch9.h
include/uapi/linux/vhost.h
include/uapi/linux/vhost_types.h
include/uapi/linux/virtio_9p.h
include/uapi/linux/virtio_balloon.h
include/uapi/linux/virtio_blk.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_console.h
include/uapi/linux/virtio_crypto.h
include/uapi/linux/virtio_fs.h
include/uapi/linux/virtio_gpu.h
include/uapi/linux/virtio_input.h
include/uapi/linux/virtio_iommu.h
include/uapi/linux/virtio_mem.h
include/uapi/linux/virtio_net.h
include/uapi/linux/virtio_pmem.h
include/uapi/linux/virtio_scsi.h
include/uapi/mtd/mtd-abi.h
include/vdso/datapage.h
include/vdso/vsyscall.h
include/xen/interface/io/displif.h
init/init_task.c
ipc/sem.c
ipc/shm.c
kernel/Makefile
kernel/bpf/bpf_iter.c
kernel/bpf/core.c
kernel/bpf/map_iter.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/crash_core.c
kernel/dma/Kconfig
kernel/dma/debug.c
kernel/dma/direct.c
kernel/dma/pool.c
kernel/events/callchain.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/manage.c
kernel/irq/pm.c
kernel/kcov.c
kernel/kexec_file.c
kernel/kmod.c
kernel/kthread.c
kernel/locking/lockdep_proc.c
kernel/module.c
kernel/panic.c
kernel/power/hibernate.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/stacktrace.c
kernel/sys_ni.c
kernel/sysctl.c
kernel/sysctl_binary.c [deleted file]
kernel/task_work.c
kernel/time/Kconfig
kernel/time/alarmtimer.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/sched_clock.c
kernel/time/timekeeping.c
kernel/time/timekeeping_internal.h
kernel/time/timer.c
kernel/time/vsyscall.c
kernel/trace/Makefile
kernel/trace/bpf_trace.c
kernel/umh.c
kernel/watch_queue.c
lib/Kconfig.debug
lib/Makefile
lib/bitmap.c
lib/crc64.c
lib/decompress_bunzip2.c
lib/decompress_unlzma.c
lib/iomap.c
lib/kstrtox.c
lib/livepatch/Makefile
lib/lz4/lz4_compress.c
lib/lz4/lz4_decompress.c
lib/lz4/lz4defs.h
lib/lz4/lz4hc_compress.c
lib/math/rational.c
lib/random32.c
lib/rbtree.c
lib/test_bitmap.c
lib/test_bitops.c
lib/test_bits.c [new file with mode: 0644]
lib/test_kmod.c
lib/test_lockup.c
lib/ts_bm.c
lib/vdso/gettimeofday.c
lib/xxhash.c
lib/xz/xz_crc32.c
lib/xz/xz_dec_bcj.c
lib/xz/xz_dec_lzma2.c
lib/xz/xz_lzma2.h
lib/xz/xz_stream.h
mm/cma.c
mm/cma.h
mm/compaction.c
mm/filemap.c
mm/frontswap.c
mm/gup.c
mm/hmm.c
mm/huge_memory.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/internal.h
mm/kasan/Makefile
mm/khugepaged.c
mm/kmemleak.c
mm/ksm.c
mm/list_lru.c
mm/maccess.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mempool.c
mm/migrate.c
mm/mlock.c
mm/mmu_notifier.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_counter.c
mm/page_io.c
mm/page_isolation.c
mm/page_vma_mapped.c
mm/percpu-internal.h
mm/percpu-km.c
mm/percpu-stats.c
mm/percpu-vm.c
mm/percpu.c
mm/process_vm_access.c
mm/rmap.c
mm/rodata_test.c
mm/shmem.c
mm/slab_common.c
mm/swap.c
mm/swap_state.c
mm/swapfile.c
mm/usercopy.c
mm/userfaultfd.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
mm/workingset.c
mm/zpool.c
mm/zsmalloc.c
net/9p/trans_fd.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/can/j1939/socket.c
net/can/j1939/transport.c
net/ceph/Kconfig
net/ceph/ceph_hash.c
net/ceph/crush/hash.c
net/ceph/crush/mapper.c
net/ceph/debugfs.c
net/ceph/osd_client.c
net/core/bpf_sk_storage.c
net/core/dev.c
net/core/devlink.c
net/core/filter.c
net/core/net-sysfs.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/bpfilter/sockopt.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv6/netfilter.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_set_rbtree.c
net/nfc/rawsock.c
net/openvswitch/datapath.c
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/packet/af_packet.c
net/qrtr/qrtr.c
net/socket.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/auth_gss/trace.c
net/sunrpc/rpc_pipe.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/Kconfig
net/tipc/netlink_compat.c
net/tls/tls_device.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/xfrm/xfrm_policy.c
samples/auxdisplay/Makefile
samples/binderfs/Makefile
samples/connector/Makefile
samples/hidraw/Makefile
samples/mei/Makefile
samples/pidfd/Makefile
samples/seccomp/Makefile
samples/timers/Makefile
samples/uhid/Makefile
samples/vfs/Makefile
samples/watch_queue/Makefile
samples/watchdog/Makefile
scripts/Kbuild.include
scripts/Makefile
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.host
scripts/Makefile.kcov
scripts/Makefile.kcsan
scripts/Makefile.lib
scripts/Makefile.ubsan
scripts/basic/Makefile
scripts/checkkconfigsymbols.py
scripts/checkpatch.pl
scripts/dtc/Makefile
scripts/gcc-plugins/Makefile
scripts/gdb/linux/rbtree.py
scripts/genksyms/Makefile
scripts/kconfig/images.c
scripts/kconfig/images.h
scripts/kconfig/lexer.l
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
scripts/kconfig/symbol.c
scripts/link-vmlinux.sh
scripts/mod/Makefile
scripts/package/buildtar
scripts/package/mkdebian
scripts/package/mkspec
scripts/recordmcount.c
scripts/selinux/genheaders/Makefile
scripts/selinux/mdp/Makefile
security/Kconfig
security/apparmor/Kconfig
security/integrity/ima/Kconfig
security/integrity/ima/ima_template.c
security/integrity/ima/ima_template_lib.c
security/integrity/ima/ima_template_lib.h
security/keys/encrypted-keys/ecryptfs_format.c
security/keys/encrypted-keys/ecryptfs_format.h
security/keys/encrypted-keys/encrypted.c
security/keys/encrypted-keys/masterkey_trusted.c
security/tomoyo/domain.c
sound/hda/hdac_bus.c
sound/hda/hdac_controller.c
sound/isa/sscape.c
sound/pci/echoaudio/echoaudio.c
sound/pci/echoaudio/echoaudio.h
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp3x-rt5682-max9836.c
sound/soc/amd/renoir/acp3x-pdm-dma.c
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8994.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/qcom/qdsp6/q6afe-dai.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/soc-component.c
sound/soc/tegra/tegra186_dspk.c
sound/soc/tegra/tegra210_admaif.c
sound/soc/tegra/tegra210_ahub.c
sound/soc/tegra/tegra210_dmic.c
sound/soc/tegra/tegra210_i2s.c
sound/usb/card.h
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/mixer_quirks.c
sound/usb/mixer_us16x08.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/stream.c
tools/arch/powerpc/include/uapi/asm/perf_regs.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/bpf/bpftool/btf_dumper.c
tools/bpf/bpftool/gen.c
tools/bpf/bpftool/iter.c
tools/bpf/bpftool/link.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/prog.c
tools/bpf/resolve_btfids/main.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-libdebuginfod.c [new file with mode: 0644]
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/in.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/lib/api/fd/array.c
tools/lib/api/fd/array.h
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/bpf_helpers.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/btf_dump.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf.map
tools/lib/perf/Documentation/libperf-counting.txt
tools/lib/perf/Documentation/libperf-sampling.txt
tools/lib/perf/Documentation/libperf.txt
tools/lib/perf/evlist.c
tools/lib/perf/include/internal/evlist.h
tools/lib/perf/include/perf/event.h
tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
tools/lib/traceevent/event-parse-local.h
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-plugin.c
tools/lib/traceevent/kbuffer.h
tools/lib/traceevent/plugins/Build
tools/lib/traceevent/plugins/Makefile
tools/lib/traceevent/plugins/plugin_function.c
tools/lib/traceevent/plugins/plugin_futex.c [new file with mode: 0644]
tools/lib/traceevent/plugins/plugin_hrtimer.c
tools/lib/traceevent/plugins/plugin_jbd2.c
tools/lib/traceevent/plugins/plugin_kmem.c
tools/lib/traceevent/plugins/plugin_kvm.c
tools/lib/traceevent/plugins/plugin_mac80211.c
tools/lib/traceevent/plugins/plugin_sched_switch.c
tools/lib/traceevent/plugins/plugin_tlb.c [new file with mode: 0644]
tools/perf/Documentation/itrace.txt
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-data.txt
tools/perf/Documentation/perf-ftrace.txt
tools/perf/Documentation/perf-intel-pt.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf.data-file-format.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/auxtrace.c
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/powerpc/include/perf_regs.h
tools/perf/arch/powerpc/util/header.c
tools/perf/arch/powerpc/util/perf_regs.c
tools/perf/arch/powerpc/util/utils_header.h [new file with mode: 0644]
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/intel-pt.c
tools/perf/bench/Build
tools/perf/bench/bench.h
tools/perf/bench/find-bit-bench.c [new file with mode: 0644]
tools/perf/bench/mem-functions.c
tools/perf/bench/numa.c
tools/perf/bench/syscall.c [new file with mode: 0644]
tools/perf/builtin-bench.c
tools/perf/builtin-c2c.c
tools/perf/builtin-data.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/pmu-events/arch/powerpc/power9/metrics.json
tools/perf/pmu-events/arch/powerpc/power9/nest_metrics.json
tools/perf/tests/Build
tools/perf/tests/attr/README
tools/perf/tests/attr/test-record-pfm-period [new file with mode: 0644]
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/expr.c
tools/perf/tests/fdarray.c
tools/perf/tests/parse-events.c
tools/perf/tests/parse-metric.c [new file with mode: 0644]
tools/perf/tests/perf-record.c
tools/perf/tests/pmu-events.c
tools/perf/tests/shell/record+script_probe_vfs_getname.sh
tools/perf/tests/tests.h
tools/perf/trace/beauty/include/linux/socket.h [new file with mode: 0644]
tools/perf/trace/beauty/sockaddr.c
tools/perf/trace/beauty/socket.sh [new file with mode: 0755]
tools/perf/ui/browsers/annotate.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/build-id.c
tools/perf/util/clockid.c [new file with mode: 0644]
tools/perf/util/clockid.h [new file with mode: 0644]
tools/perf/util/data-convert-bt.c
tools/perf/util/data-convert.h
tools/perf/util/debug.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/expr.c
tools/perf/util/expr.h
tools/perf/util/expr.l
tools/perf/util/expr.y
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
tools/perf/util/intel-pt.c
tools/perf/util/jitdump.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/metricgroup.c
tools/perf/util/metricgroup.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/parse-sublevel-options.c [new file with mode: 0644]
tools/perf/util/parse-sublevel-options.h [new file with mode: 0644]
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_api_probe.h
tools/perf/util/perf_event_attr_fprintf.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/record.h
tools/perf/util/session.c
tools/perf/util/stat-shadow.c
tools/perf/util/stat.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/tool.h
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
tools/testing/selftests/bpf/prog_tests/btf_dump.c
tools/testing/selftests/bpf/prog_tests/core_extern.c
tools/testing/selftests/bpf/prog_tests/core_reloc.c
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/global_data.c
tools/testing/selftests/bpf/prog_tests/mmap.c
tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
tools/testing/selftests/bpf/prog_tests/send_signal.c
tools/testing/selftests/bpf/prog_tests/sk_lookup.c
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
tools/testing/selftests/bpf/prog_tests/varlen.c
tools/testing/selftests/bpf/progs/core_reloc_types.h
tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
tools/testing/selftests/bpf/progs/test_varlen.c
tools/testing/selftests/bpf/settings [new file with mode: 0644]
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_progs.h
tools/testing/selftests/bpf/test_tcpnotify_user.c
tools/testing/selftests/cgroup/test_kmem.c
tools/testing/selftests/exec/.gitignore
tools/testing/selftests/exec/Makefile
tools/testing/selftests/exec/non-regular.c [new file with mode: 0644]
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/mptcp/config
tools/testing/selftests/net/mptcp/mptcp_connect.c
tools/testing/selftests/netfilter/nft_flowtable.sh
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/vm/hmm-tests.c
tools/virtio/linux/virtio_config.h
virt/kvm/async_pf.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c
virt/lib/irqbypass.c

index a96f1b3..332c783 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -2,38 +2,44 @@
 # This list is used by git-shortlog to fix a few botched name translations
 # in the git archive, either because the author's full name was messed up
 # and/or not always written the same way, making contributions from the
-# same person appearing not to be so or badly displayed.
+# same person appearing not to be so or badly displayed. Also allows for
+# old email addresses to map to new email addresses.
 #
+# For format details, see "MAPPING AUTHORS" in "man git-shortlog".
+#
+# Please keep this list dictionary sorted.
+#
+# This comment is parsed by git-shortlog:
 # repo-abbrev: /pub/scm/linux/kernel/git/
 #
-
 Aaron Durbin <adurbin@google.com>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
-Adrian Bunk <bunk@stusta.de>
 Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
+Adrian Bunk <bunk@stusta.de>
 Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
-Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
 Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
 Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
 Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
-Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
+Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
+Andi Kleen <ak@linux.intel.com> <ak@suse.de>
 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
 Andreas Herrmann <aherrman@de.ibm.com>
-Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
-Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
+Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andy Adamson <andros@citi.umich.edu>
 Antoine Tenart <antoine.tenart@free-electrons.com>
 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
@@ -43,40 +49,42 @@ Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Axel Dyks <xl@xlsigned.net>
 Axel Lin <axel.lin@gmail.com>
-Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
+Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
-Christoph Hellwig <hch@lst.de>
 Christophe Ricard <christophe.ricard@gmail.com>
+Christoph Hellwig <hch@lst.de>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
-Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
-Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
 Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
-Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
 Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
 David Brownell <david-b@pacbell.net>
 David Woodhouse <dwmw2@shinybook.infradead.org>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
 <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
 Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
 Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
 Domen Puncer <domen@coderock.org>
 Douglas Gilbert <dougg@torque.net>
 Ed L. Cashin <ecashin@coraid.com>
@@ -87,19 +95,22 @@ Felix Kuhling <fxkuehl@gmx.de>
 Felix Moeller <felix@derklecks.de>
 Filipe Lautert <filipe@icewall.org>
 Franck Bui-Huu <vagabon.xyz@gmail.com>
-Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
 Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
-Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
+Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
 Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
 Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
@@ -109,32 +120,33 @@ Henrik Rydberg <rydberg@bitmath.org>
 Herbert Xu <herbert@gondor.apana.org.au>
 Jacob Shin <Jacob.Shin@amd.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
-Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
 James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
-James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
 James Hogan <jhogan@kernel.org> <james@albanarts.com>
+James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
 James Ketrenos <jketreno@io.(none)>
 Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
+Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
-Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
 Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
 Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
 Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
-Jean Tourrilhes <jt@hpl.hp.com>
 <jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
+Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
-Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
 Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
 Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
@@ -158,30 +170,31 @@ Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
 Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
 Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
-Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
-Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
-Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
 Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
+Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
+Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
+Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
 Linas Vepstas <linas@austin.ibm.com>
-Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
-Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
+Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
-Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
+Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Mark Brown <broonie@sirena.org.uk>
 Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
-Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Mathieu Othacehe <m.othacehe@gmail.com>
 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
 Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
@@ -191,17 +204,17 @@ Matthew Wilcox <willy@infradead.org> <willy@debian.org>
 Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
 Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
 Matthieu CASTET <castet.matthieu@free.fr>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
+Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
+Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
+Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
+Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
-Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
-Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
-Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
-Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
@@ -233,13 +246,13 @@ Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
 Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
+Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.ibm.com>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.vnet.ibm.com>
-Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
 Peter A Jonsson <pj@ludd.ltu.se>
-Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
+Peter Oruba <peter@oruba.de>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
@@ -252,23 +265,23 @@ Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
 Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
 Rémi Denis-Courmont <rdenis@simphalempin.com>
-Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
+Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
 Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
-Sarangdhar Joshi <spjoshi@codeaurora.org>
+Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
 Sam Ravnborg <sam@mars.ravnborg.org>
-Santosh Shilimkar <ssantosh@kernel.org>
 Santosh Shilimkar <santosh.shilimkar@oracle.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.Çağlar Onur <caglar@pardus.org.tr>
-Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
 Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
-Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
+Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
@@ -279,18 +292,21 @@ Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
+Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
 Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
 Subhash Jadavani <subhashj@codeaurora.org>
 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 Sumit Semwal <sumit.semwal@ti.com>
+Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
 Thomas Pedersen <twp@codeaurora.org>
 Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
 Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
 Tony Luck <tony.luck@intel.com>
-TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
+TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
@@ -299,22 +315,16 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
-Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
-Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
-Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
 Will Deacon <will@kernel.org> <will.deacon@arm.com>
-Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
 Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
+Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
-Gustavo Padovan <gustavo@las.ic.unicamp.br>
-Gustavo Padovan <padovan@profusion.mobi>
-Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
-Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
index a1cb44d..e4f76e7 100644 (file)
@@ -202,6 +202,25 @@ Description:
                functions. See the section named 'NVDIMM Root Device _DSMs' in
                the ACPI specification.
 
+What:          /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle
+Date:          Apr, 2020
+KernelVersion: v5.8
+Contact:       linux-nvdimm@lists.01.org
+Description:
+               (RW) The Intel platform implementation of firmware activate
+               support exposes an option let the platform force idle devices in
+               the system over the activation event, or trust that the OS will
+               do it. The safe default is to let the platform force idle
+               devices since the kernel is already in a suspend state, and on
+               the chance that a driver does not properly quiesce bus-mastering
+               after a suspend callback the platform will handle it.  However,
+               the activation might abort if, for example, platform firmware
+               determines that the activation time exceeds the max PCI-E
+               completion timeout. Since the platform does not know whether the
+               OS is running the activation from a suspend context it aborts,
+               but if the system owner trusts driver suspend callback to be
+               sufficient then 'firmware_activation_noidle' can be
+               enabled to bypass the activation abort.
 
 What:          /sys/bus/nd/devices/regionX/nfit/range_index
 Date:          Jun, 2015
diff --git a/Documentation/ABI/testing/sysfs-bus-nvdimm b/Documentation/ABI/testing/sysfs-bus-nvdimm
new file mode 100644 (file)
index 0000000..d643802
--- /dev/null
@@ -0,0 +1,2 @@
+The libnvdimm sub-system implements a common sysfs interface for
+platform nvdimm resources. See Documentation/driver-api/nvdimm/.
diff --git a/Documentation/ABI/testing/sysfs-driver-input-exc3000 b/Documentation/ABI/testing/sysfs-driver-input-exc3000
new file mode 100644 (file)
index 0000000..3d316d5
--- /dev/null
@@ -0,0 +1,15 @@
+What:          /sys/bus/i2c/devices/xxx/fw_version
+Date:          Aug 2020
+Contact:       linux-input@vger.kernel.org
+Description:    Reports the firmware version provided by the touchscreen, for example "00_T6" on a EXC80H60
+
+               Access: Read
+               Valid values: Represented as string
+
+What:          /sys/bus/i2c/devices/xxx/model
+Date:          Aug 2020
+Contact:       linux-input@vger.kernel.org
+Description:    Reports the model identification provided by the touchscreen, for example "Orion_1320" on a EXC80H60
+
+               Access: Read
+               Valid values: Represented as string
index 4bb93a0..7f730c4 100644 (file)
@@ -229,7 +229,9 @@ Date:               August 2017
 Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
 Description:   Do background GC agressively when set. When gc_urgent = 1,
                background thread starts to do GC by given gc_urgent_sleep_time
-               interval. It is set to 0 by default.
+               interval. When gc_urgent = 2, F2FS will lower the bar of
+               checking idle in order to process outstanding discard commands
+               and GC a little bit aggressively. It is set to 0 by default.
 
 What:          /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
 Date:          August 2017
index fa4018a..6be4378 100644 (file)
@@ -1274,6 +1274,10 @@ PAGE_SIZE multiple when read back.
                Amount of memory used for storing in-kernel data
                structures.
 
+         percpu
+               Amount of memory used for storing per-cpu kernel
+               data structures.
+
          sock
                Amount of memory used in network transmission buffers
 
index a683976..d2795ca 100644 (file)
@@ -489,6 +489,9 @@ Files in /sys/fs/ext4/<devname>:
         multiple of this tuning parameter if the stripe size is not set in the
         ext4 superblock
 
+  mb_max_inode_prealloc
+        The maximum length of per-inode ext4_prealloc_space list.
+
   mb_max_to_scan
         The maximum number of extents the multiblock allocator will search to
         find the best extent.
@@ -529,21 +532,21 @@ Files in /sys/fs/ext4/<devname>:
 Ioctls
 ======
 
-There is some Ext4 specific functionality which can be accessed by applications
-through the system call interfaces. The list of all Ext4 specific ioctls are
-shown in the table below.
+Ext4 implements various ioctls which can be used by applications to access
+ext4-specific functionality. An incomplete list of these ioctls is shown in the
+table below. This list includes truly ext4-specific ioctls (``EXT4_IOC_*``) as
+well as ioctls that may have been ext4-specific originally but are now supported
+by some other filesystem(s) too (``FS_IOC_*``).
 
-Table of Ext4 specific ioctls
+Table of Ext4 ioctls
 
-  EXT4_IOC_GETFLAGS
+  FS_IOC_GETFLAGS
         Get additional attributes associated with inode.  The ioctl argument is
-        an integer bitfield, with bit values described in ext4.h. This ioctl is
-        an alias for FS_IOC_GETFLAGS.
+        an integer bitfield, with bit values described in ext4.h.
 
-  EXT4_IOC_SETFLAGS
+  FS_IOC_SETFLAGS
         Set additional attributes associated with inode.  The ioctl argument is
-        an integer bitfield, with bit values described in ext4.h. This ioctl is
-        an alias for FS_IOC_SETFLAGS.
+        an integer bitfield, with bit values described in ext4.h.
 
   EXT4_IOC_GETVERSION, EXT4_IOC_GETVERSION_OLD
         Get the inode i_generation number stored for each inode. The
index ba9988d..140e4ce 100644 (file)
@@ -80,6 +80,10 @@ The possible values in this file are:
        - The processor is not vulnerable.
      * - KVM: Mitigation: Split huge pages
        - Software changes mitigate this issue.
+     * - KVM: Mitigation: VMX unsupported
+       - KVM is not vulnerable because Virtual Machine Extensions (VMX) is not supported.
+     * - KVM: Mitigation: VMX disabled
+       - KVM is not vulnerable because Virtual Machine Extensions (VMX) is disabled.
      * - KVM: Vulnerable
        - The processor is vulnerable, but no mitigation enabled
 
index 98ea67f..bdc1f33 100644 (file)
                        memory region [offset, offset + size] for that kernel
                        image. If '@offset' is omitted, then a suitable offset
                        is selected automatically.
-                       [KNL, x86_64] select a region under 4G first, and
+                       [KNL, X86-64] Select a region under 4G first, and
                        fall back to reserve region above 4G when '@offset'
                        hasn't been specified.
                        See Documentation/admin-guide/kdump/kdump.rst for further details.
                        Documentation/admin-guide/kdump/kdump.rst for an example.
 
        crashkernel=size[KMG],high
-                       [KNL, x86_64] range could be above 4G. Allow kernel
+                       [KNL, X86-64] range could be above 4G. Allow kernel
                        to allocate physical memory region from top, so could
                        be above 4G if system have more than 4G ram installed.
                        Otherwise memory region will be allocated below 4G, if
                        available.
                        It will be ignored if crashkernel=X is specified.
        crashkernel=size[KMG],low
-                       [KNL, x86_64] range under 4G. When crashkernel=X,high
+                       [KNL, X86-64] range under 4G. When crashkernel=X,high
                        is passed, kernel could allocate physical memory region
                        above 4G, that cause second kernel crash on system
                        that require some amount of low memory, e.g. swiotlb
 
        gamma=          [HW,DRM]
 
-       gart_fix_e820=  [X86_64] disable the fix e820 for K8 GART
+       gart_fix_e820=  [X86-64] disable the fix e820 for K8 GART
                        Format: off | on
                        default: on
 
                        Format: 0 | 1
                        Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
 
-       init_pkru=      [x86] Specify the default memory protection keys rights
+       init_pkru=      [X86] Specify the default memory protection keys rights
                        register contents for all processes.  0x55555554 by
                        default (disallow access to all but pkey 0).  Can
                        override in debugfs after boot.
        inport.irq=     [HW] Inport (ATI XL and Microsoft) busmouse driver
                        Format: <irq>
 
-       int_pln_enable  [x86] Enable power limit notification interrupt
+       int_pln_enable  [X86] Enable power limit notification interrupt
 
        integrity_audit=[IMA]
                        Format: { "0" | "1" }
                        bypassed by not enabling DMAR with this option. In
                        this case, gfx device will use physical address for
                        DMA.
-               forcedac [x86_64]
+               forcedac [X86-64]
                        With this option iommu will not optimize to look
                        for io virtual address below 32-bit forcing dual
                        address cycle on pci bus for cards supporting greater
                strict  regions from userspace.
                relaxed
 
-       iommu=          [x86]
+       iommu=          [X86]
                off
                force
                noforce
                merge
                nomerge
                soft
-               pt              [x86]
-               nopt            [x86]
+               pt              [X86]
+               nopt            [X86]
                nobypass        [PPC/POWERNV]
                        Disable IOMMU bypass, using IOMMU for PCI devices.
 
 
        iucv=           [HW,NET]
 
-       ivrs_ioapic     [HW,X86_64]
+       ivrs_ioapic     [HW,X86-64]
                        Provide an override to the IOAPIC-ID<->DEVICE-ID
                        mapping provided in the IVRS ACPI table. For
                        example, to map IOAPIC-ID decimal 10 to
                        PCI device 00:14.0 write the parameter as:
                                ivrs_ioapic[10]=00:14.0
 
-       ivrs_hpet       [HW,X86_64]
+       ivrs_hpet       [HW,X86-64]
                        Provide an override to the HPET-ID<->DEVICE-ID
                        mapping provided in the IVRS ACPI table. For
                        example, to map HPET-ID decimal 0 to
                        PCI device 00:14.0 write the parameter as:
                                ivrs_hpet[0]=00:14.0
 
-       ivrs_acpihid    [HW,X86_64]
+       ivrs_acpihid    [HW,X86-64]
                        Provide an override to the ACPI-HID:UID<->DEVICE-ID
                        mapping provided in the IVRS ACPI table. For
                        example, to map UART-HID:UID AMD0020:0 to
        lapic           [X86-32,APIC] Enable the local APIC even if BIOS
                        disabled it.
 
-       lapic=          [x86,APIC] "notscdeadline" Do not use TSC deadline
+       lapic=          [X86,APIC] "notscdeadline" Do not use TSC deadline
                        value for LAPIC timer one-shot implementation. Default
                        back to the programmable timer unit in the LAPIC.
 
                        register save and restore. The kernel will only save
                        legacy floating-point registers on task switch.
 
-       nohugeiomap     [KNL,x86,PPC] Disable kernel huge I/O mappings.
+       nohugeiomap     [KNL,X86,PPC] Disable kernel huge I/O mappings.
 
        nosmt           [KNL,S390] Disable symmetric multithreading (SMT).
                        Equivalent to smt=1.
 
-                       [KNL,x86] Disable symmetric multithreading (SMT).
+                       [KNL,X86] Disable symmetric multithreading (SMT).
                        nosmt=force: Force disable SMT, cannot be undone
                                     via the sysfs control file.
 
        pt.             [PARIDE]
                        See Documentation/admin-guide/blockdev/paride.rst.
 
-       pti=            [X86_64] Control Page Table Isolation of user and
+       pti=            [X86-64] Control Page Table Isolation of user and
                        kernel address spaces.  Disabling this feature
                        removes hardening, but improves performance of
                        system calls and interrupts.
 
                        Not specifying this option is equivalent to pti=auto.
 
-       nopti           [X86_64]
+       nopti           [X86-64]
                        Equivalent to pti=off
 
        pty.legacy_count=
index 9db9249..7adef96 100644 (file)
@@ -54,10 +54,13 @@ registered (see `below <status_attr_>`_).
 Operation Modes
 ===============
 
-``intel_pstate`` can operate in three different modes: in the active mode with
-or without hardware-managed P-states support and in the passive mode.  Which of
-them will be in effect depends on what kernel command line options are used and
-on the capabilities of the processor.
+``intel_pstate`` can operate in two different modes, active or passive.  In the
+active mode, it uses its own internal performance scaling governor algorithm or
+allows the hardware to do preformance scaling by itself, while in the passive
+mode it responds to requests made by a generic ``CPUFreq`` governor implementing
+a certain performance scaling algorithm.  Which of them will be in effect
+depends on what kernel command line options are used and on the capabilities of
+the processor.
 
 Active Mode
 -----------
@@ -194,10 +197,11 @@ This is the default operation mode of ``intel_pstate`` for processors without
 hardware-managed P-states (HWP) support.  It is always used if the
 ``intel_pstate=passive`` argument is passed to the kernel in the command line
 regardless of whether or not the given processor supports HWP.  [Note that the
-``intel_pstate=no_hwp`` setting implies ``intel_pstate=passive`` if it is used
-without ``intel_pstate=active``.]  Like in the active mode without HWP support,
-in this mode ``intel_pstate`` may refuse to work with processors that are not
-recognized by it.
+``intel_pstate=no_hwp`` setting causes the driver to start in the passive mode
+if it is not combined with ``intel_pstate=active``.]  Like in the active mode
+without HWP support, in this mode ``intel_pstate`` may refuse to work with
+processors that are not recognized by it if HWP is prevented from being enabled
+through the kernel command line.
 
 If the driver works in this mode, the ``scaling_driver`` policy attribute in
 ``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq".
@@ -318,10 +322,9 @@ manuals need to be consulted to get to it too.
 
 For this reason, there is a list of supported processors in ``intel_pstate`` and
 the driver initialization will fail if the detected processor is not in that
-list, unless it supports the `HWP feature <Active Mode_>`_.  [The interface to
-obtain all of the information listed above is the same for all of the processors
-supporting the HWP feature, which is why they all are supported by
-``intel_pstate``.]
+list, unless it supports the HWP feature.  [The interface to obtain all of the
+information listed above is the same for all of the processors supporting the
+HWP feature, which is why ``intel_pstate`` works with all of them.]
 
 
 User Space Interface in ``sysfs``
@@ -425,22 +428,16 @@ argument is passed to the kernel in the command line.
        as well as the per-policy ones) are then reset to their default
        values, possibly depending on the target operation mode.]
 
-       That only is supported in some configurations, though (for example, if
-       the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
-       the operation mode of the driver cannot be changed), and if it is not
-       supported in the current configuration, writes to this attribute will
-       fail with an appropriate error.
-
 ``energy_efficiency``
-       This attribute is only present on platforms, which have CPUs matching
-       Kaby Lake or Coffee Lake desktop CPU model. By default
-       energy efficiency optimizations are disabled on these CPU models in HWP
-       mode by this driver. Enabling energy efficiency may limit maximum
-       operating frequency in both HWP and non HWP mode. In non HWP mode,
-       optimizations are done only in the turbo frequency range. In HWP mode,
-       optimizations are done in the entire frequency range. Setting this
-       attribute to "1" enables energy efficiency optimizations and setting
-       to "0" disables energy efficiency optimizations.
+       This attribute is only present on platforms with CPUs matching the Kaby
+       Lake or Coffee Lake desktop CPU model. By default, energy-efficiency
+       optimizations are disabled on these CPU models if HWP is enabled.
+       Enabling energy-efficiency optimizations may limit maximum operating
+       frequency with or without the HWP feature.  With HWP enabled, the
+       optimizations are done only in the turbo frequency range.  Without it,
+       they are done in the entire available frequency range.  Setting this
+       attribute to "1" enables the energy-efficiency optimizations and setting
+       to "0" disables them.
 
 Interpretation of Policy Attributes
 -----------------------------------
@@ -484,8 +481,8 @@ Next, the following policy attributes have special meaning if
        policy for the time interval between the last two invocations of the
        driver's utilization update callback by the CPU scheduler for that CPU.
 
-One more policy attribute is present if the `HWP feature is enabled in the
-processor <Active Mode With HWP_>`_:
+One more policy attribute is present if the HWP feature is enabled in the
+processor:
 
 ``base_frequency``
        Shows the base frequency of the CPU. Any frequency above this will be
@@ -526,11 +523,11 @@ on the following rules, regardless of the current operation mode of the driver:
 
  3. The global and per-policy limits can be set independently.
 
-If the `HWP feature is enabled in the processor <Active Mode With HWP_>`_, the
-resulting effective values are written into its registers whenever the limits
-change in order to request its internal P-state selection logic to always set
-P-states within these limits.  Otherwise, the limits are taken into account by
-scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver
+In the `active mode with the HWP feature enabled <Active Mode With HWP_>`_, the
+resulting effective values are written into hardware registers whenever the
+limits change in order to request its internal P-state selection logic to always
+set P-states within these limits.  Otherwise, the limits are taken into account
+by scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver
 every time before setting a new P-state for a CPU.
 
 Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument
@@ -541,12 +538,11 @@ at all and the only way to set the limits is by using the policy attributes.
 Energy vs Performance Hints
 ---------------------------
 
-If ``intel_pstate`` works in the `active mode with the HWP feature enabled
-<Active Mode With HWP_>`_ in the processor, additional attributes are present
-in every ``CPUFreq`` policy directory in ``sysfs``.  They are intended to allow
-user space to help ``intel_pstate`` to adjust the processor's internal P-state
-selection logic by focusing it on performance or on energy-efficiency, or
-somewhere between the two extremes:
+If the hardware-managed P-states (HWP) is enabled in the processor, additional
+attributes, intended to allow user space to help ``intel_pstate`` to adjust the
+processor's internal P-state selection logic by focusing it on performance or on
+energy-efficiency, or somewhere between the two extremes, are present in every
+``CPUFreq`` policy directory in ``sysfs``.  They are :
 
 ``energy_performance_preference``
        Current value of the energy vs performance hint for the given policy
@@ -650,12 +646,14 @@ of them have to be prepended with the ``intel_pstate=`` prefix.
        Do not register ``intel_pstate`` as the scaling driver even if the
        processor is supported by it.
 
+``active``
+       Register ``intel_pstate`` in the `active mode <Active Mode_>`_ to start
+       with.
+
 ``passive``
        Register ``intel_pstate`` in the `passive mode <Passive Mode_>`_ to
        start with.
 
-       This option implies the ``no_hwp`` one described below.
-
 ``force``
        Register ``intel_pstate`` as the scaling driver instead of
        ``acpi-cpufreq`` even if the latter is preferred on the given system.
@@ -670,13 +668,12 @@ of them have to be prepended with the ``intel_pstate=`` prefix.
        driver is used instead of ``acpi-cpufreq``.
 
 ``no_hwp``
-       Do not enable the `hardware-managed P-states (HWP) feature
-       <Active Mode With HWP_>`_ even if it is supported by the processor.
+       Do not enable the hardware-managed P-states (HWP) feature even if it is
+       supported by the processor.
 
 ``hwp_only``
        Register ``intel_pstate`` as the scaling driver only if the
-       `hardware-managed P-states (HWP) feature <Active Mode With HWP_>`_ is
-       supported by the processor.
+       hardware-managed P-states (HWP) feature is supported by the processor.
 
 ``support_acpi_ppc``
        Take ACPI ``_PPC`` performance limits into account.
index 2ae9669..d4b32cc 100644 (file)
@@ -164,7 +164,8 @@ core_pattern
        %s              signal number
        %t              UNIX time of dump
        %h              hostname
-       %e              executable filename (may be shortened)
+       %e              executable filename (may be shortened, could be changed by prctl etc)
+       %f              executable filename
        %E              executable path
        %c              maximum size of core file by resource limit RLIMIT_CORE
        %<OTHER>        both are dropped
index d997cc3..4b9d2e8 100644 (file)
@@ -119,6 +119,21 @@ all zones are compacted such that free memory is available in contiguous
 blocks where possible. This can be important for example in the allocation of
 huge pages although processes will also directly compact memory as required.
 
+compaction_proactiveness
+========================
+
+This tunable takes a value in the range [0, 100] with a default value of
+20. This tunable determines how aggressively compaction is done in the
+background. Setting it to 0 disables proactive compaction.
+
+Note that compaction has a non-trivial system-wide impact as pages
+belonging to different processes are moved around, which could also lead
+to latency spikes in unsuspecting applications. The kernel employs
+various heuristics to avoid wasting CPU cycles if it detects that
+proactive compaction is not being effective.
+
+Be careful when setting it to extreme values like 100, as that may
+cause excessive background compaction activity.
 
 compact_unevictable_allowed
 ===========================
index 3f7c3a7..d358780 100644 (file)
@@ -125,6 +125,9 @@ stable kernels.
 | Cavium         | ThunderX2 Core  | #219            | CAVIUM_TX2_ERRATUM_219      |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| Marvell        | ARM-MMU-500     | #582743         | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
index 12a246f..2df7b06 100644 (file)
@@ -246,17 +246,6 @@ program is loaded the kernel will print warning message, so
 this helper is only useful for experiments and prototypes.
 Tracing BPF programs are root only.
 
-Q: bpf_trace_printk() helper warning
-------------------------------------
-Q: When bpf_trace_printk() helper is used the kernel prints nasty
-warning message. Why is that?
-
-A: This is done to nudge program authors into better interfaces when
-programs need to pass data to user space. Like bpf_perf_event_output()
-can be used to efficiently stream data via perf ring buffer.
-BPF maps can be used for asynchronous data sharing between kernel
-and user space. bpf_trace_printk() should only be used for debugging.
-
 Q: New functionality via kernel modules?
 ----------------------------------------
 Q: Can BPF functionality such as new program or map types, new
index d46429b..7df2465 100644 (file)
@@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained.
    bpf_devel_QA
 
 
+Helper functions
+================
+
+* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs.
+
+
 Program types
 =============
 
@@ -79,4 +85,5 @@ Other
 .. _networking-filter: ../networking/filter.rst
 .. _man-pages: https://www.kernel.org/doc/man-pages/
 .. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
+.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
 .. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/
index 2de9058..70500b1 100644 (file)
@@ -557,7 +557,7 @@ phase. Currently, the capabilities are any of::
        CDC_DRIVE_STATUS        /* driver implements drive status */
 
 The capability flag is declared *const*, to prevent drivers from
-accidentally tampering with the contents. The capability fags actually
+accidentally tampering with the contents. The capability flags actually
 inform `cdrom.c` of what the driver can do. If the drive found
 by the driver does not have the capability, is can be masked out by
 the *cdrom_device_info* variable *mask*. For instance, the SCSI CD-ROM
@@ -736,7 +736,7 @@ Description of routines in `cdrom.c`
 
 Only a few routines in `cdrom.c` are exported to the drivers. In this
 new section we will discuss these, as well as the functions that `take
-over' the CD-ROM interface to the kernel. The header file belonging
+over` the CD-ROM interface to the kernel. The header file belonging
 to `cdrom.c` is called `cdrom.h`. Formerly, some of the contents of this
 file were placed in the file `ucdrom.h`, but this file has now been
 merged back into `cdrom.h`.
index a273805..2eb5afd 100644 (file)
@@ -20,48 +20,48 @@ only ID allocation, and as a result is much more memory-efficient.
 IDR usage
 =========
 
-Start by initialising an IDR, either with :c:func:`DEFINE_IDR`
-for statically allocated IDRs or :c:func:`idr_init` for dynamically
+Start by initialising an IDR, either with DEFINE_IDR()
+for statically allocated IDRs or idr_init() for dynamically
 allocated IDRs.
 
-You can call :c:func:`idr_alloc` to allocate an unused ID.  Look up
-the pointer you associated with the ID by calling :c:func:`idr_find`
-and free the ID by calling :c:func:`idr_remove`.
+You can call idr_alloc() to allocate an unused ID.  Look up
+the pointer you associated with the ID by calling idr_find()
+and free the ID by calling idr_remove().
 
 If you need to change the pointer associated with an ID, you can call
-:c:func:`idr_replace`.  One common reason to do this is to reserve an
+idr_replace().  One common reason to do this is to reserve an
 ID by passing a ``NULL`` pointer to the allocation function; initialise the
 object with the reserved ID and finally insert the initialised object
 into the IDR.
 
 Some users need to allocate IDs larger than ``INT_MAX``.  So far all of
 these users have been content with a ``UINT_MAX`` limit, and they use
-:c:func:`idr_alloc_u32`.  If you need IDs that will not fit in a u32,
+idr_alloc_u32().  If you need IDs that will not fit in a u32,
 we will work with you to address your needs.
 
 If you need to allocate IDs sequentially, you can use
-:c:func:`idr_alloc_cyclic`.  The IDR becomes less efficient when dealing
+idr_alloc_cyclic().  The IDR becomes less efficient when dealing
 with larger IDs, so using this function comes at a slight cost.
 
 To perform an action on all pointers used by the IDR, you can
-either use the callback-based :c:func:`idr_for_each` or the
-iterator-style :c:func:`idr_for_each_entry`.  You may need to use
-:c:func:`idr_for_each_entry_continue` to continue an iteration.  You can
-also use :c:func:`idr_get_next` if the iterator doesn't fit your needs.
+either use the callback-based idr_for_each() or the
+iterator-style idr_for_each_entry().  You may need to use
+idr_for_each_entry_continue() to continue an iteration.  You can
+also use idr_get_next() if the iterator doesn't fit your needs.
 
-When you have finished using an IDR, you can call :c:func:`idr_destroy`
+When you have finished using an IDR, you can call idr_destroy()
 to release the memory used by the IDR.  This will not free the objects
 pointed to from the IDR; if you want to do that, use one of the iterators
 to do it.
 
-You can use :c:func:`idr_is_empty` to find out whether there are any
+You can use idr_is_empty() to find out whether there are any
 IDs currently allocated.
 
 If you need to take a lock while allocating a new ID from the IDR,
 you may need to pass a restrictive set of GFP flags, which can lead
 to the IDR being unable to allocate memory.  To work around this,
-you can call :c:func:`idr_preload` before taking the lock, and then
-:c:func:`idr_preload_end` after the allocation.
+you can call idr_preload() before taking the lock, and then
+idr_preload_end() after the allocation.
 
 .. kernel-doc:: include/linux/idr.h
    :doc: idr sync
index 6c791af..74c5e6a 100644 (file)
@@ -175,13 +175,20 @@ For example, to check drivers/net/wireless/ one may write::
     make coccicheck M=drivers/net/wireless/
 
 To apply Coccinelle on a file basis, instead of a directory basis, the
-following command may be used::
+C variable is used by the makefile to select which files to work with.
+This variable can be used to run scripts for the entire kernel, a
+specific directory, or for a single file.
 
-    make C=1 CHECK="scripts/coccicheck"
+For example, to check drivers/bluetooth/bfusb.c, the value 1 is
+passed to the C variable to check files that make considers
+need to be compiled.::
 
-To check only newly edited code, use the value 2 for the C flag, i.e.::
+    make C=1 CHECK=scripts/coccicheck drivers/bluetooth/bfusb.o
 
-    make C=2 CHECK="scripts/coccicheck"
+The value 2 is passed to the C variable to check files regardless of
+whether they need to be compiled or not.::
+
+    make C=2 CHECK=scripts/coccicheck drivers/bluetooth/bfusb.o
 
 In these modes, which work on a file basis, there is no information
 about semantic patches displayed, and no commit message proposed.
index 0e52e96..c908ef4 100644 (file)
@@ -316,7 +316,7 @@ driver as a loadable kernel module kgdbwait will not do anything.
 Kernel parameter: ``kgdbcon``
 -----------------------------
 
-The ``kgdbcon`` feature allows you to see :c:func:`printk` messages inside gdb
+The ``kgdbcon`` feature allows you to see printk() messages inside gdb
 while gdb is connected to the kernel. Kdb does not make use of the kgdbcon
 feature.
 
@@ -432,7 +432,7 @@ This is a quick example of how to use kdb.
    ``ps``      Displays only the active processes
    ``ps A``    Shows all the processes
    ``summary`` Shows kernel version info and memory usage
-   ``bt``      Get a backtrace of the current process using :c:func:`dump_stack`
+   ``bt``      Get a backtrace of the current process using dump_stack()
    ``dmesg``   View the kernel syslog buffer
    ``go``      Continue the system
    =========== =================================================================
@@ -724,7 +724,7 @@ The kernel debugger is organized into a number of components:
    The arch-specific portion implements:
 
    -  contains an arch-specific trap catcher which invokes
-      :c:func:`kgdb_handle_exception` to start kgdb about doing its work
+      kgdb_handle_exception() to start kgdb about doing its work
 
    -  translation to and from gdb specific packet format to :c:type:`pt_regs`
 
@@ -769,7 +769,7 @@ The kernel debugger is organized into a number of components:
          config. Later run ``modprobe kdb_hello`` and the next time you
          enter the kdb shell, you can run the ``hello`` command.
 
-   -  The implementation for :c:func:`kdb_printf` which emits messages directly
+   -  The implementation for kdb_printf() which emits messages directly
       to I/O drivers, bypassing the kernel log.
 
    -  SW / HW breakpoint management for the kdb shell
@@ -875,7 +875,7 @@ kernel when ``CONFIG_KDB_KEYBOARD=y`` is set in the kernel configuration.
 The core polled keyboard driver for PS/2 type keyboards is in
 ``drivers/char/kdb_keyboard.c``. This driver is hooked into the debug core
 when kgdboc populates the callback in the array called
-:c:type:`kdb_poll_funcs[]`. The :c:func:`kdb_get_kbd_char` is the top-level
+:c:type:`kdb_poll_funcs[]`. The kdb_get_kbd_char() is the top-level
 function which polls hardware for single character input.
 
 kgdboc and kms
@@ -887,10 +887,10 @@ that you have a video driver which has a frame buffer console and atomic
 kernel mode setting support.
 
 Every time the kernel debugger is entered it calls
-:c:func:`kgdboc_pre_exp_handler` which in turn calls :c:func:`con_debug_enter`
+kgdboc_pre_exp_handler() which in turn calls con_debug_enter()
 in the virtual console layer. On resuming kernel execution, the kernel
-debugger calls :c:func:`kgdboc_post_exp_handler` which in turn calls
-:c:func:`con_debug_leave`.
+debugger calls kgdboc_post_exp_handler() which in turn calls
+con_debug_leave().
 
 Any video driver that wants to be compatible with the kernel debugger
 and the atomic kms callbacks must implement the ``mode_set_base_atomic``,
index 192ded4..f0daf99 100644 (file)
@@ -67,9 +67,9 @@ patternProperties:
       compatible:
         items:
           - enum:
-            - arm,integrator-ap-syscon
-            - arm,integrator-cp-syscon
-            - arm,integrator-sp-syscon
+              - arm,integrator-ap-syscon
+              - arm,integrator-cp-syscon
+              - arm,integrator-sp-syscon
           - const: syscon
       reg:
         maxItems: 1
index d6e85d1..1d0b4e2 100644 (file)
@@ -55,20 +55,20 @@ properties:
       compatible:
         oneOf:
           - items:
-            - const: arm,realview-eb-soc
-            - const: simple-bus
+              - const: arm,realview-eb-soc
+              - const: simple-bus
           - items:
-            - const: arm,realview-pb1176-soc
-            - const: simple-bus
+              - const: arm,realview-pb1176-soc
+              - const: simple-bus
           - items:
-            - const: arm,realview-pb11mp-soc
-            - const: simple-bus
+              - const: arm,realview-pb11mp-soc
+              - const: simple-bus
           - items:
-            - const: arm,realview-pba8-soc
-            - const: simple-bus
+              - const: arm,realview-pba8-soc
+              - const: simple-bus
           - items:
-            - const: arm,realview-pbx-soc
-            - const: simple-bus
+              - const: arm,realview-pbx-soc
+              - const: simple-bus
 
     patternProperties:
       "^.*syscon@[0-9a-f]+$":
@@ -79,35 +79,35 @@ properties:
           compatible:
             oneOf:
               - items:
-                - const: arm,realview-eb11mp-revb-syscon
-                - const: arm,realview-eb-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-eb11mp-revb-syscon
+                  - const: arm,realview-eb-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-eb11mp-revc-syscon
-                - const: arm,realview-eb-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-eb11mp-revc-syscon
+                  - const: arm,realview-eb-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-eb-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-eb-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-pb1176-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-pb1176-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-pb11mp-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-pb11mp-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-pba8-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-pba8-syscon
+                  - const: syscon
+                  - const: simple-mfd
               - items:
-                - const: arm,realview-pbx-syscon
-                - const: syscon
-                - const: simple-mfd
+                  - const: arm,realview-pbx-syscon
+                  - const: syscon
+                  - const: simple-mfd
 
         required:
           - compatible
index a3420c8..26829a8 100644 (file)
@@ -165,10 +165,10 @@ patternProperties:
       compatible:
         oneOf:
           - items:
-            - enum:
-              - arm,vexpress,v2m-p1
-              - arm,vexpress,v2p-p1
-            - const: simple-bus
+              - enum:
+                  - arm,vexpress,v2m-p1
+                  - arm,vexpress,v2p-p1
+              - const: simple-bus
           - const: simple-bus
       motherboard:
         type: object
@@ -186,8 +186,8 @@ patternProperties:
           compatible:
             items:
               - enum:
-                - arm,vexpress,v2m-p1
-                - arm,vexpress,v2p-p1
+                  - arm,vexpress,v2m-p1
+                  - arm,vexpress,v2p-p1
               - const: simple-bus
           arm,v2m-memory-map:
             description: This describes the memory map type.
index b5ef266..497600a 100644 (file)
@@ -15,7 +15,7 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm28155-ap
+          - brcm,bcm28155-ap
       - const: brcm,bcm11351
 
 ...
index aafbd6a..e0ee931 100644 (file)
@@ -15,7 +15,7 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm21664-garnet
+          - brcm,bcm21664-garnet
       - const: brcm,bcm21664
 
 ...
index c4b4efd..40d12ea 100644 (file)
@@ -15,7 +15,7 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm23550-sparrow
+          - brcm,bcm23550-sparrow
       - const: brcm,bcm23550
 
 ...
index fe111e7..9ba7b16 100644 (file)
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Broadcom Cygnus device tree bindings
 
 maintainers:
-   - Ray Jui <rjui@broadcom.com>
-   - Scott Branden <sbranden@broadcom.com>
+  - Ray Jui <rjui@broadcom.com>
+  - Scott Branden <sbranden@broadcom.com>
 
 properties:
   $nodename:
@@ -16,14 +16,14 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm11300
-        - brcm,bcm11320
-        - brcm,bcm11350
-        - brcm,bcm11360
-        - brcm,bcm58300
-        - brcm,bcm58302
-        - brcm,bcm58303
-        - brcm,bcm58305
+          - brcm,bcm11300
+          - brcm,bcm11320
+          - brcm,bcm11350
+          - brcm,bcm11360
+          - brcm,bcm58300
+          - brcm,bcm58302
+          - brcm,bcm58303
+          - brcm,bcm58305
       - const: brcm,cygnus
 
 ...
index 1158f49..ae614b6 100644 (file)
@@ -21,7 +21,7 @@ properties:
   compatible:
     items:
       - enum:
-        - ubnt,unifi-switch8
+          - ubnt,unifi-switch8
       - const: brcm,bcm53342
       - const: brcm,hr2
 
index 2451704..0749adf 100644 (file)
@@ -16,8 +16,8 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,ns2-svk
-        - brcm,ns2-xmc
+          - brcm,ns2-svk
+          - brcm,ns2-xmc
       - const: brcm,ns2
 
 ...
index fe364ce..8c2cacb 100644 (file)
@@ -24,13 +24,13 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm58522
-        - brcm,bcm58525
-        - brcm,bcm58535
-        - brcm,bcm58622
-        - brcm,bcm58623
-        - brcm,bcm58625
-        - brcm,bcm88312
+          - brcm,bcm58522
+          - brcm,bcm58525
+          - brcm,bcm58535
+          - brcm,bcm58622
+          - brcm,bcm58623
+          - brcm,bcm58625
+          - brcm,bcm88312
       - const: brcm,nsp
 
 ...
index 4ad2b21..c13cb96 100644 (file)
@@ -16,9 +16,9 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,bcm958742k
-        - brcm,bcm958742t
-        - brcm,bcm958802a802x
+          - brcm,bcm958742k
+          - brcm,bcm958742t
+          - brcm,bcm958802a802x
       - const: brcm,stingray
 
 ...
index c5b6f31..ccdf9f9 100644 (file)
@@ -15,8 +15,8 @@ properties:
   compatible:
     items:
       - enum:
-        - brcm,vulcan-eval
-        - cavium,thunderx2-cn9900
+          - brcm,vulcan-eval
+          - cavium,thunderx2-cn9900
       - const: brcm,vulcan-soc
 
 ...
index b48ed87..17e4f20 100644 (file)
@@ -10,6 +10,15 @@ maintainers:
   - Eric Anholt <eric@anholt.net>
   - Stefan Wahren <wahrenst@gmx.net>
 
+select:
+  properties:
+    compatible:
+      contains:
+        const: raspberrypi,bcm2835-firmware
+
+  required:
+    - compatible
+
 properties:
   compatible:
     items:
index 17df5cd..e42ff69 100644 (file)
@@ -82,12 +82,12 @@ properties:
   compatible:
     oneOf:
       - items:
-        - const: arm,coresight-cti
-        - const: arm,primecell
+          - const: arm,coresight-cti
+          - const: arm,primecell
       - items:
-        - const: arm,coresight-cti-v8-arch
-        - const: arm,coresight-cti
-        - const: arm,primecell
+          - const: arm,coresight-cti-v8-arch
+          - const: arm,coresight-cti
+          - const: arm,primecell
 
   reg:
     maxItems: 1
@@ -191,16 +191,16 @@ patternProperties:
 
     anyOf:
       - required:
-        - arm,trig-in-sigs
+          - arm,trig-in-sigs
       - required:
-        - arm,trig-out-sigs
+          - arm,trig-out-sigs
     oneOf:
       - required:
-        - arm,trig-conn-name
+          - arm,trig-conn-name
       - required:
-        - cpu
+          - cpu
       - required:
-        - arm,cs-dev-assoc
+          - arm,cs-dev-assoc
     required:
       - reg
 
index 40f692c..1222bf1 100644 (file)
@@ -330,8 +330,8 @@ if:
     - enable-method
 
 then:
-   required:
-     - secondary-boot-reg
+  required:
+    - secondary-boot-reg
 
 required:
   - device_type
index f63895c..6da9d73 100644 (file)
@@ -273,8 +273,8 @@ properties:
               - fsl,imx6ull-14x14-evk     # i.MX6 UltraLiteLite 14x14 EVK Board
               - kontron,imx6ull-n6411-som # Kontron N6411 SOM
               - myir,imx6ull-mys-6ulx-eval # MYiR Tech iMX6ULL Evaluation Board
-              - toradex,colibri-imx6ull-eval            # Colibri iMX6ULL Module on Colibri Evaluation Board
-              - toradex,colibri-imx6ull-wifi-eval       # Colibri iMX6ULL Wi-Fi / Bluetooth Module on Colibri Evaluation Board
+              - toradex,colibri-imx6ull-eval      # Colibri iMX6ULL Module on Colibri Eval Board
+              - toradex,colibri-imx6ull-wifi-eval # Colibri iMX6ULL Wi-Fi / BT Module on Colibri Eval Board
           - const: fsl,imx6ull
 
       - description: Kontron N6411 S Board
@@ -312,9 +312,12 @@ properties:
               - toradex,colibri-imx7d                   # Colibri iMX7 Dual Module
               - toradex,colibri-imx7d-aster             # Colibri iMX7 Dual Module on Aster Carrier Board
               - toradex,colibri-imx7d-emmc              # Colibri iMX7 Dual 1GB (eMMC) Module
-              - toradex,colibri-imx7d-emmc-aster        # Colibri iMX7 Dual 1GB (eMMC) Module on Aster Carrier Board
-              - toradex,colibri-imx7d-emmc-eval-v3      # Colibri iMX7 Dual 1GB (eMMC) Module on Colibri Evaluation Board V3
-              - toradex,colibri-imx7d-eval-v3           # Colibri iMX7 Dual Module on Colibri Evaluation Board V3
+              - toradex,colibri-imx7d-emmc-aster        # Colibri iMX7 Dual 1GB (eMMC) Module on
+                                                        #  Aster Carrier Board
+              - toradex,colibri-imx7d-emmc-eval-v3      # Colibri iMX7 Dual 1GB (eMMC) Module on
+                                                        #  Colibri Evaluation Board V3
+              - toradex,colibri-imx7d-eval-v3           # Colibri iMX7 Dual Module on
+                                                        #  Colibri Evaluation Board V3
               - tq,imx7d-mba7             # i.MX7D TQ MBa7 with TQMa7D SoM
               - zii,imx7d-rmu2            # ZII RMU2 Board
               - zii,imx7d-rpu2            # ZII RPU2 Board
index 4d92578..06a7b05 100644 (file)
@@ -14,6 +14,6 @@ properties:
   compatible:
     items:
       - enum:
-        - intel,keembay-evm
+          - intel,keembay-evm
       - const: intel,keembay
 ...
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml b/Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml
new file mode 100644 (file)
index 0000000..7597bc9
--- /dev/null
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/keystone/ti,k3-sci-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common K3 TI-SCI bindings
+
+maintainers:
+  - Nishanth Menon <nm@ti.com>
+
+description: |
+  The TI K3 family of SoCs usually have a central System Controller Processor
+  that is responsible for managing various SoC-level resources like clocks,
+  resets, interrupts etc. The communication with that processor is performed
+  through the TI-SCI protocol.
+
+  Each specific device management node like a clock controller node, a reset
+  controller node or an interrupt-controller node should define a common set
+  of properties that enables them to implement the corresponding functionality
+  over the TI-SCI protocol. The following are some of the common properties
+  needed by such individual nodes. The required properties for each device
+  management node is defined in the respective binding.
+
+properties:
+  ti,sci:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      Should be a phandle to the TI-SCI System Controller node
+
+  ti,sci-dev-id:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      Should contain the TI-SCI device id corresponding to the device. Please
+      refer to the corresponding System Controller documentation for valid
+      values for the desired device.
+
+  ti,sci-proc-ids:
+    description: Should contain a single tuple of <proc_id host_id>.
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    items:
+      - description: TI-SCI processor id for the remote processor device
+      - description: TI-SCI host id to which processor control ownership
+                     should be transferred to
index e271c46..1af3017 100644 (file)
@@ -17,22 +17,22 @@ properties:
   compatible:
     oneOf:
       - items:
-        - enum:
-          - mediatek,mt2701-pericfg
-          - mediatek,mt2712-pericfg
-          - mediatek,mt6765-pericfg
-          - mediatek,mt7622-pericfg
-          - mediatek,mt7629-pericfg
-          - mediatek,mt8135-pericfg
-          - mediatek,mt8173-pericfg
-          - mediatek,mt8183-pericfg
-          - mediatek,mt8516-pericfg
-        - const: syscon
+          - enum:
+              - mediatek,mt2701-pericfg
+              - mediatek,mt2712-pericfg
+              - mediatek,mt6765-pericfg
+              - mediatek,mt7622-pericfg
+              - mediatek,mt7629-pericfg
+              - mediatek,mt8135-pericfg
+              - mediatek,mt8173-pericfg
+              - mediatek,mt8183-pericfg
+              - mediatek,mt8516-pericfg
+          - const: syscon
       - items:
-        # Special case for mt7623 for backward compatibility
-        - const: mediatek,mt7623-pericfg
-        - const: mediatek,mt2701-pericfg
-        - const: syscon
+          # Special case for mt7623 for backward compatibility
+          - const: mediatek,mt7623-pericfg
+          - const: mediatek,mt2701-pericfg
+          - const: syscon
 
   reg:
     maxItems: 1
index 68b0131..37ba333 100644 (file)
@@ -19,7 +19,7 @@ description: |
   reported to the APB terminator (APB Errors Handler Block).
 
 allOf:
- - $ref: /schemas/simple-bus.yaml#
 - $ref: /schemas/simple-bus.yaml#
 
 properties:
   compatible:
index 29e1aae..0bee469 100644 (file)
@@ -23,7 +23,7 @@ description: |
   accessible by means of the Baikal-T1 System Controller.
 
 allOf:
- - $ref: /schemas/simple-bus.yaml#
 - $ref: /schemas/simple-bus.yaml#
 
 properties:
   compatible:
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt
deleted file mode 100644 (file)
index 6165b6d..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-Binding for IDT VersaClock 5,6 programmable i2c clock generators.
-
-The IDT VersaClock 5 and VersaClock 6 are programmable i2c clock
-generators providing from 3 to 12 output clocks.
-
-==I2C device node==
-
-Required properties:
-- compatible:  shall be one of
-               "idt,5p49v5923"
-               "idt,5p49v5925"
-               "idt,5p49v5933"
-               "idt,5p49v5935"
-               "idt,5p49v6901"
-               "idt,5p49v6965"
-- reg:         i2c device address, shall be 0x68 or 0x6a.
-- #clock-cells:        from common clock binding; shall be set to 1.
-- clocks:      from common clock binding; list of parent clock handles,
-               - 5p49v5923 and
-                 5p49v5925 and
-                 5p49v6901: (required) either or both of XTAL or CLKIN
-                                       reference clock.
-               - 5p49v5933 and
-               - 5p49v5935: (optional) property not present (internal
-                                       Xtal used) or CLKIN reference
-                                       clock.
-- clock-names: from common clock binding; clock input names, can be
-               - 5p49v5923 and
-                 5p49v5925 and
-                 5p49v6901: (required) either or both of "xin", "clkin".
-               - 5p49v5933 and
-               - 5p49v5935: (optional) property not present or "clkin".
-
-For all output ports, a corresponding, optional child node named OUT1,
-OUT2, etc. can represent a each output, and the node can be used to
-specify the following:
-
-- itd,mode: can be one of the following:
-                 - VC5_LVPECL
-                 - VC5_CMOS
-                 - VC5_HCSL33
-                 - VC5_LVDS
-                 - VC5_CMOS2
-                 - VC5_CMOSD
-                 - VC5_HCSL25
-
-- idt,voltage-microvolts:  can be one of the following
-                 - 1800000
-                 - 2500000
-                 - 3300000
--  idt,slew-percent: Percent of normal, can be one of
-                 - 80
-                 - 85
-                 - 90
-                 - 100
-
-==Mapping between clock specifier and physical pins==
-
-When referencing the provided clock in the DT using phandle and
-clock specifier, the following mapping applies:
-
-5P49V5923:
-       0 -- OUT0_SEL_I2CB
-       1 -- OUT1
-       2 -- OUT2
-
-5P49V5933:
-       0 -- OUT0_SEL_I2CB
-       1 -- OUT1
-       2 -- OUT4
-
-5P49V5925 and
-5P49V5935:
-       0 -- OUT0_SEL_I2CB
-       1 -- OUT1
-       2 -- OUT2
-       3 -- OUT3
-       4 -- OUT4
-
-5P49V6901:
-       0 -- OUT0_SEL_I2CB
-       1 -- OUT1
-       2 -- OUT2
-       3 -- OUT3
-       4 -- OUT4
-
-==Example==
-
-/* 25MHz reference crystal */
-ref25: ref25m {
-       compatible = "fixed-clock";
-       #clock-cells = <0>;
-       clock-frequency = <25000000>;
-};
-
-i2c-master-node {
-
-       /* IDT 5P49V5923 i2c clock generator */
-       vc5: clock-generator@6a {
-               compatible = "idt,5p49v5923";
-               reg = <0x6a>;
-               #clock-cells = <1>;
-
-               /* Connect XIN input to 25MHz reference */
-               clocks = <&ref25m>;
-               clock-names = "xin";
-
-               OUT1 {
-                       itd,mode = <VC5_CMOS>;
-                       idt,voltage-microvolts = <1800000>;
-                       idt,slew-percent = <80>;
-               };
-               OUT2 {
-                       ...
-               };
-               ...
-       };
-};
-
-/* Consumer referencing the 5P49V5923 pin OUT1 */
-consumer {
-       ...
-       clocks = <&vc5 1>;
-       ...
-}
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml b/Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
new file mode 100644 (file)
index 0000000..28c6461
--- /dev/null
@@ -0,0 +1,154 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/idt,versaclock5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for IDT VersaClock 5 and 6 programmable I2C clock generators
+
+description: |
+  The IDT VersaClock 5 and VersaClock 6 are programmable I2C
+  clock generators providing from 3 to 12 output clocks.
+
+  When referencing the provided clock in the DT using phandle and clock
+  specifier, the following mapping applies:
+
+  - 5P49V5923:
+    0 -- OUT0_SEL_I2CB
+    1 -- OUT1
+    2 -- OUT2
+
+  - 5P49V5933:
+    0 -- OUT0_SEL_I2CB
+    1 -- OUT1
+    2 -- OUT4
+
+  - other parts:
+    0 -- OUT0_SEL_I2CB
+    1 -- OUT1
+    2 -- OUT2
+    3 -- OUT3
+    4 -- OUT4
+
+maintainers:
+  - Luca Ceresoli <luca@lucaceresoli.net>
+
+properties:
+  compatible:
+    enum:
+      - idt,5p49v5923
+      - idt,5p49v5925
+      - idt,5p49v5933
+      - idt,5p49v5935
+      - idt,5p49v6901
+      - idt,5p49v6965
+
+  reg:
+    description: I2C device address
+    enum: [ 0x68, 0x6a ]
+
+  '#clock-cells':
+    const: 1
+
+patternProperties:
+  "^OUT[1-4]$":
+    type: object
+    description:
+      Description of one of the outputs (OUT1..OUT4). See "Clock1 Output
+      Configuration" in the Versaclock 5/6/6E Family Register Description
+      and Programming Guide.
+    properties:
+      idt,mode:
+        description:
+          The output drive mode. Values defined in dt-bindings/clk/versaclock.h
+        $ref: /schemas/types.yaml#/definitions/uint32
+        minimum: 0
+        maximum: 6
+      idt,voltage-microvolt:
+        description: The output drive voltage.
+        enum: [ 1800000, 2500000, 3300000 ]
+      idt,slew-percent:
+        description: The Slew rate control for CMOS single-ended.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [ 80, 85, 90, 100 ]
+
+required:
+  - compatible
+  - reg
+  - '#clock-cells'
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          enum:
+            - idt,5p49v5933
+            - idt,5p49v5935
+    then:
+      # Devices with builtin crystal + optional external input
+      properties:
+        clock-names:
+          const: clkin
+        clocks:
+          maxItems: 1
+    else:
+      # Devices without builtin crystal
+      properties:
+        clock-names:
+          minItems: 1
+          maxItems: 2
+          items:
+            enum: [ xin, clkin ]
+        clocks:
+          minItems: 1
+          maxItems: 2
+      required:
+        - clock-names
+        - clocks
+
+examples:
+  - |
+    #include <dt-bindings/clk/versaclock.h>
+
+    /* 25MHz reference crystal */
+    ref25: ref25m {
+        compatible = "fixed-clock";
+        #clock-cells = <0>;
+        clock-frequency = <25000000>;
+    };
+
+    i2c@0 {
+        reg = <0x0 0x100>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        /* IDT 5P49V5923 I2C clock generator */
+        vc5: clock-generator@6a {
+            compatible = "idt,5p49v5923";
+            reg = <0x6a>;
+            #clock-cells = <1>;
+
+            /* Connect XIN input to 25MHz reference */
+            clocks = <&ref25m>;
+            clock-names = "xin";
+
+            OUT1 {
+                idt,drive-mode = <VC5_CMOSD>;
+                idt,voltage-microvolts = <1800000>;
+                idt,slew-percent = <80>;
+            };
+
+            OUT4 {
+                idt,drive-mode = <VC5_LVDS>;
+            };
+        };
+    };
+
+    /* Consumer referencing the 5P49V5923 pin OUT1 */
+    consumer {
+        /* ... */
+        clocks = <&vc5 1>;
+        /* ... */
+    };
+
+...
index 66cb238..ad21899 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX23
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 72328d5..f1af110 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX28
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index a952d58..5dd7ea8 100644 (file)
@@ -47,12 +47,12 @@ properties:
   compatible:
     items:
       - enum:
-        - ingenic,jz4740-cgu
-        - ingenic,jz4725b-cgu
-        - ingenic,jz4770-cgu
-        - ingenic,jz4780-cgu
-        - ingenic,x1000-cgu
-        - ingenic,x1830-cgu
+          - ingenic,jz4740-cgu
+          - ingenic,jz4725b-cgu
+          - ingenic,jz4770-cgu
+          - ingenic,jz4780-cgu
+          - ingenic,x1000-cgu
+          - ingenic,x1830-cgu
       - const: simple-mfd
     minItems: 1
 
@@ -68,8 +68,8 @@ properties:
     items:
       - const: ext
       - enum:
-        - rtc
-        - osc32k # Different name, same clock
+          - rtc
+          - osc32k # Different name, same clock
 
   assigned-clocks:
     minItems: 1
@@ -1,23 +1,31 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/clock/qcom,sdm845-gpucc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,gpucc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Qualcomm Graphics Clock & Reset Controller Binding for SDM845
+title: Qualcomm Graphics Clock & Reset Controller Binding
 
 maintainers:
   - Taniya Das <tdas@codeaurora.org>
 
 description: |
   Qualcomm graphics clock control module which supports the clocks, resets and
-  power domains on SDM845.
+  power domains on SDM845/SC7180/SM8150/SM8250.
 
-  See also dt-bindings/clock/qcom,gpucc-sdm845.h.
+  See also:
+    dt-bindings/clock/qcom,gpucc-sdm845.h
+    dt-bindings/clock/qcom,gpucc-sc7180.h
+    dt-bindings/clock/qcom,gpucc-sm8150.h
+    dt-bindings/clock/qcom,gpucc-sm8250.h
 
 properties:
   compatible:
-    const: qcom,sdm845-gpucc
+    enum:
+      - qcom,sdm845-gpucc
+      - qcom,sc7180-gpucc
+      - qcom,sm8150-gpucc
+      - qcom,sm8250-gpucc
 
   clocks:
     items:
index 1b16a86..af32dee 100644 (file)
@@ -65,7 +65,7 @@ properties:
 
   protected-clocks:
     description:
-       Protected clock specifier list as per common clock binding
+      Protected clock specifier list as per common clock binding
 
   vdd-gfx-supply:
     description:
index d673ede..a20cb10 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/clock/qcom,kryocc.yaml#
+$id: http://devicetree.org/schemas/clock/qcom,msm8996-apcc.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Qualcomm clock controller for MSM8996 CPUs
@@ -46,11 +46,9 @@ required:
 additionalProperties: false
 
 examples:
-  # Example for msm8996
   - |
     kryocc: clock-controller@6400000 {
         compatible = "qcom,msm8996-apcc";
         reg = <0x6400000 0x90000>;
         #clock-cells = <1>;
-  };
-...
+    };
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
deleted file mode 100644 (file)
index fe08461..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/clock/qcom,sc7180-gpucc.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm Graphics Clock & Reset Controller Binding for SC7180
-
-maintainers:
-  - Taniya Das <tdas@codeaurora.org>
-
-description: |
-  Qualcomm graphics clock control module which supports the clocks, resets and
-  power domains on SC7180.
-
-  See also dt-bindings/clock/qcom,gpucc-sc7180.h.
-
-properties:
-  compatible:
-    const: qcom,sc7180-gpucc
-
-  clocks:
-    items:
-      - description: Board XO source
-      - description: GPLL0 main branch source
-      - description: GPLL0 div branch source
-
-  clock-names:
-    items:
-      - const: bi_tcxo
-      - const: gcc_gpu_gpll0_clk_src
-      - const: gcc_gpu_gpll0_div_clk_src
-
-  '#clock-cells':
-    const: 1
-
-  '#reset-cells':
-    const: 1
-
-  '#power-domain-cells':
-    const: 1
-
-  reg:
-    maxItems: 1
-
-required:
-  - compatible
-  - reg
-  - clocks
-  - clock-names
-  - '#clock-cells'
-  - '#reset-cells'
-  - '#power-domain-cells'
-
-additionalProperties: false
-
-examples:
-  - |
-    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
-    #include <dt-bindings/clock/qcom,rpmh.h>
-    clock-controller@5090000 {
-      compatible = "qcom,sc7180-gpucc";
-      reg = <0x05090000 0x9000>;
-      clocks = <&rpmhcc RPMH_CXO_CLK>,
-               <&gcc GCC_GPU_GPLL0_CLK_SRC>,
-               <&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
-      clock-names = "bi_tcxo",
-                    "gcc_gpu_gpll0_clk_src",
-                    "gcc_gpu_gpll0_div_clk_src";
-      #clock-cells = <1>;
-      #reset-cells = <1>;
-      #power-domain-cells = <1>;
-    };
-...
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-lpasscorecc.yaml
new file mode 100644 (file)
index 0000000..c54172f
--- /dev/null
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,sc7180-lpasscorecc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm LPASS Core Clock Controller Binding for SC7180
+
+maintainers:
+  - Taniya Das <tdas@codeaurora.org>
+
+description: |
+  Qualcomm LPASS core clock control module which supports the clocks and
+  power domains on SC7180.
+
+  See also:
+  - dt-bindings/clock/qcom,lpasscorecc-sc7180.h
+
+properties:
+  compatible:
+    enum:
+      - qcom,sc7180-lpasshm
+      - qcom,sc7180-lpasscorecc
+
+  clocks:
+    items:
+      - description: gcc_lpass_sway clock from GCC
+      - description: Board XO source
+
+  clock-names:
+    items:
+      - const: iface
+      - const: bi_tcxo
+
+  power-domains:
+    maxItems: 1
+
+  '#clock-cells':
+    const: 1
+
+  '#power-domain-cells':
+    const: 1
+
+  reg:
+    minItems: 1
+    items:
+      - description: lpass core cc register
+      - description: lpass audio cc register
+
+  reg-names:
+    items:
+      - const: lpass_core_cc
+      - const: lpass_audio_cc
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: qcom,sc7180-lpasshm
+then:
+  properties:
+    reg:
+      maxItems: 1
+
+else:
+  properties:
+    reg:
+      minItems: 2
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - '#clock-cells'
+  - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+    #include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
+    clock-controller@63000000 {
+      compatible = "qcom,sc7180-lpasshm";
+      reg = <0x63000000 0x28>;
+      clocks = <&gcc GCC_LPASS_CFG_NOC_SWAY_CLK>, <&rpmhcc RPMH_CXO_CLK>;
+      clock-names = "iface", "bi_tcxo";
+      #clock-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+
+  - |
+    #include <dt-bindings/clock/qcom,rpmh.h>
+    #include <dt-bindings/clock/qcom,gcc-sc7180.h>
+    #include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
+    clock-controller@62d00000 {
+      compatible = "qcom,sc7180-lpasscorecc";
+      reg = <0x62d00000 0x50000>, <0x62780000 0x30000>;
+      reg-names = "lpass_core_cc", "lpass_audio_cc";
+      clocks = <&gcc GCC_LPASS_CFG_NOC_SWAY_CLK>, <&rpmhcc RPMH_CXO_CLK>;
+      clock-names = "iface", "bi_tcxo";
+      power-domains = <&lpass_hm LPASS_CORE_HM_GDSCR>;
+      #clock-cells = <1>;
+      #power-domain-cells = <1>;
+    };
+...
index b83f413..9185d10 100644 (file)
@@ -19,15 +19,15 @@ description:
 properties:
   compatible:
     oneOf:
-      - const: renesas,r8a73a4-cpg-clocks # R-Mobile APE6
-      - const: renesas,r8a7740-cpg-clocks # R-Mobile A1
-      - const: renesas,r8a7778-cpg-clocks # R-Car M1
-      - const: renesas,r8a7779-cpg-clocks # R-Car H1
+      - const: renesas,r8a73a4-cpg-clocks     # R-Mobile APE6
+      - const: renesas,r8a7740-cpg-clocks     # R-Mobile A1
+      - const: renesas,r8a7778-cpg-clocks     # R-Car M1
+      - const: renesas,r8a7779-cpg-clocks     # R-Car H1
       - items:
-        - enum:
-            - renesas,r7s72100-cpg-clocks # RZ/A1H
-        - const: renesas,rz-cpg-clocks    # RZ/A1
-      - const: renesas,sh73a0-cpg-clocks  # SH-Mobile AG5
+          - enum:
+              - renesas,r7s72100-cpg-clocks   # RZ/A1H
+          - const: renesas,rz-cpg-clocks      # RZ/A1
+      - const: renesas,sh73a0-cpg-clocks      # SH-Mobile AG5
 
   reg:
     maxItems: 1
index 8cb47c3..bf3a9ec 100644 (file)
@@ -4,9 +4,15 @@ The RK3288 clock controller generates and supplies clock to various
 controllers within the SoC and also implements a reset controller for SoC
 peripherals.
 
+A revision of this SoC is available: rk3288w. The clock tree is a bit
+different so another dt-compatible is available. Noticed that it is only
+setting the difference but there is no automatic revision detection. This
+should be performed by bootloaders.
+
 Required Properties:
 
-- compatible: should be "rockchip,rk3288-cru"
+- compatible: should be "rockchip,rk3288-cru" or "rockchip,rk3288w-cru" in
+  case of this revision of Rockchip rk3288.
 - reg: physical base address of the controller and length of memory mapped
   region.
 - #clock-cells: should be 1.
index 2981387..c6d0915 100644 (file)
@@ -16,7 +16,7 @@ properties:
   "#clock-cells":
     const: 1
 
-  compatible :
+  compatible:
     enum:
       - sprd,sc9863a-ap-clk
       - sprd,sc9863a-aon-clk
index 52b3cda..f54b4e4 100644 (file)
@@ -32,8 +32,7 @@ properties:
       - const: hdmi
 
   ddc:
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
     description: >
       Phandle of the I2C controller used for DDC EDID probing
 
index 2c4c34e..04099f5 100644 (file)
@@ -162,14 +162,13 @@ required:
 additionalProperties: false
 
 examples:
- - |
+  - |
+    #include <dt-bindings/clock/imx8mq-clock.h>
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/reset/imx8mq-reset.h>
 
-   #include <dt-bindings/clock/imx8mq-clock.h>
-   #include <dt-bindings/gpio/gpio.h>
-   #include <dt-bindings/interrupt-controller/arm-gic.h>
-   #include <dt-bindings/reset/imx8mq-reset.h>
-
-   mipi_dsi: mipi_dsi@30a00000 {
+    mipi_dsi: mipi_dsi@30a00000 {
               #address-cells = <1>;
               #size-cells = <0>;
               compatible = "fsl,imx8mq-nwl-dsi";
@@ -224,4 +223,4 @@ examples:
                            };
                     };
               };
-      };
+    };
index 98c7330..baaf2a2 100644 (file)
@@ -119,17 +119,17 @@ then:
         # The LVDS encoder can use the EXTAL or DU_DOTCLKINx clocks.
         # These clocks are optional.
         - enum:
-          - extal
-          - dclkin.0
-          - dclkin.1
+            - extal
+            - dclkin.0
+            - dclkin.1
         - enum:
-          - extal
-          - dclkin.0
-          - dclkin.1
+            - extal
+            - dclkin.0
+            - dclkin.1
         - enum:
-          - extal
-          - dclkin.0
-          - dclkin.1
+            - extal
+            - dclkin.0
+            - dclkin.1
 
   required:
     - clock-names
index 0880cbf..3ddb35f 100644 (file)
@@ -18,16 +18,16 @@ properties:
   compatible:
     oneOf:
       - items:
-        - enum:
-          - ti,ths8134a
-          - ti,ths8134b
-        - const: ti,ths8134
+          - enum:
+              - ti,ths8134a
+              - ti,ths8134b
+          - const: ti,ths8134
       - enum:
-        - adi,adv7123
-        - dumb-vga-dac
-        - ti,opa362
-        - ti,ths8134
-        - ti,ths8135
+          - adi,adv7123
+          - dumb-vga-dac
+          - ti,opa362
+          - ti,ths8134
+          - ti,ths8135
 
   ports:
     type: object
index 85b71b1..a02039e 100644 (file)
@@ -55,11 +55,11 @@ patternProperties:
       clock-master:
         type: boolean
         description:
-           Should be enabled if the host is being used in conjunction with
-           another DSI host to drive the same peripheral. Hardware supporting
-           such a configuration generally requires the data on both the busses
-           to be driven by the same clock. Only the DSI host instance
-           controlling this clock should contain this property.
+          Should be enabled if the host is being used in conjunction with
+          another DSI host to drive the same peripheral. Hardware supporting
+          such a configuration generally requires the data on both the busses
+          to be driven by the same clock. Only the DSI host instance
+          controlling this clock should contain this property.
 
       enforce-video-mode:
         type: boolean
index 66e93e5..aecff34 100644 (file)
@@ -21,9 +21,9 @@ properties:
     items:
       - enum:
           # Waveshare 3.5" 320x480 Color TFT LCD
-        - waveshare,rpi-lcd-35
+          - waveshare,rpi-lcd-35
           # Ozzmaker 3.5" 320x480 Color TFT LCD
-        - ozzmaker,piscreen
+          - ozzmaker,piscreen
       - const: ilitek,ili9486
 
   spi-max-frequency:
index 5bfc33e..12064a8 100644 (file)
@@ -13,11 +13,11 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4725b-ipu
-        - ingenic,jz4760-ipu
+          - ingenic,jz4725b-ipu
+          - ingenic,jz4760-ipu
       - items:
-        - const: ingenic,jz4770-ipu
-        - const: ingenic,jz4760-ipu
+          - const: ingenic,jz4770-ipu
+          - const: ingenic,jz4760-ipu
 
   reg:
     maxItems: 1
index d56db18..768050f 100644 (file)
@@ -58,11 +58,11 @@ properties:
       - port@0
 
 required:
-    - compatible
-    - reg
-    - interrupts
-    - clocks
-    - clock-names
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
 
 if:
   properties:
index 0b8736a..53056dd 100644 (file)
@@ -38,10 +38,10 @@ properties:
 
   clocks:
     items:
-     - description: GMU clock
-     - description: GPU CX clock
-     - description: GPU AXI clock
-     - description: GPU MEMNOC clock
+      - description: GMU clock
+      - description: GPU CX clock
+      - description: GPU AXI clock
+      - description: GPU MEMNOC clock
 
   clock-names:
     items:
@@ -52,8 +52,8 @@ properties:
 
   interrupts:
     items:
-     - description: GMU HFI interrupt
-     - description: GMU interrupt
+      - description: GMU HFI interrupt
+      - description: GMU interrupt
 
 
   interrupt-names:
@@ -62,14 +62,14 @@ properties:
       - const: gmu
 
   power-domains:
-     items:
-       - description: CX power domain
-       - description: GX power domain
+    items:
+      - description: CX power domain
+      - description: GX power domain
 
   power-domain-names:
-     items:
-       - const: cx
-       - const: gx
+    items:
+      - const: cx
+      - const: gx
 
   iommus:
     maxItems: 1
@@ -90,13 +90,13 @@ required:
   - operating-points-v2
 
 examples:
- - |
-   #include <dt-bindings/clock/qcom,gpucc-sdm845.h>
-   #include <dt-bindings/clock/qcom,gcc-sdm845.h>
-   #include <dt-bindings/interrupt-controller/irq.h>
-   #include <dt-bindings/interrupt-controller/arm-gic.h>
 - |
+    #include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+    #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-   gmu: gmu@506a000 {
+    gmu: gmu@506a000 {
         compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
 
         reg = <0x506a000 0x30000>,
@@ -120,4 +120,4 @@ examples:
 
         iommus = <&adreno_smmu 5>;
         operating-points-v2 = <&gmu_opp_table>;
-   };
+    };
index 083d2b9..75a09df 100644 (file)
@@ -24,9 +24,9 @@ properties:
   reg: true
   reset-gpios: true
   vdd-supply:
-     description: core voltage supply
+    description: core voltage supply
   vddio-supply:
-     description: vddio supply
+    description: vddio supply
 
 required:
   - compatible
index 7f5df58..38bc1d1 100644 (file)
@@ -48,12 +48,12 @@ properties:
   port: true
 
 required:
- - compatible
- - reg
- - enable-gpios
- - pp1800-supply
- - avdd-supply
- - avee-supply
 - compatible
 - reg
 - enable-gpios
 - pp1800-supply
 - avdd-supply
 - avee-supply
 
 additionalProperties: false
 
index aa761f6..7adb83e 100644 (file)
@@ -19,9 +19,9 @@ properties:
   backlight: true
   reset-gpios: true
   iovcc-supply:
-     description: regulator that supplies the iovcc voltage
+    description: regulator that supplies the iovcc voltage
   vdd-supply:
-     description: regulator that supplies the vdd voltage
+    description: regulator that supplies the vdd voltage
 
 required:
   - compatible
index 927f1ee..81adb82 100644 (file)
@@ -19,11 +19,11 @@ properties:
   backlight: true
   reset-gpios: true
   avdd-supply:
-     description: regulator that supplies the AVDD voltage
+    description: regulator that supplies the AVDD voltage
   dvdd-supply:
-     description: regulator that supplies the DVDD voltage
+    description: regulator that supplies the DVDD voltage
   cvdd-supply:
-     description: regulator that supplies the CVDD voltage
+    description: regulator that supplies the CVDD voltage
 
 required:
   - compatible
index 177d48c..e89c1ea 100644 (file)
@@ -25,8 +25,7 @@ properties:
   compatible:
     items:
       - enum:
-        - dlink,dir-685-panel
-
+          - dlink,dir-685-panel
       - const: ilitek,ili9322
 
   reset-gpios: true
index a393322..76a9068 100644 (file)
@@ -13,8 +13,7 @@ properties:
   compatible:
     items:
       - enum:
-        - bananapi,lhr050h41
-
+          - bananapi,lhr050h41
       - const: ilitek,ili9881c
 
   backlight: true
index a372bdc..3715882 100644 (file)
@@ -21,9 +21,9 @@ properties:
   backlight: true
   reset-gpios: true
   iovcc-supply:
-     description: regulator that supplies the iovcc voltage
+    description: regulator that supplies the iovcc voltage
   vci-supply:
-     description: regulator that supplies the vci voltage
+    description: regulator that supplies the vci voltage
 
 required:
   - compatible
index b900973..c5944b4 100644 (file)
@@ -19,9 +19,9 @@ properties:
   backlight: true
   reset-gpios: true
   iovcc-supply:
-     description: regulator that supplies the iovcc voltage
+    description: regulator that supplies the iovcc voltage
   vcc-supply:
-     description: regulator that supplies the vcc voltage
+    description: regulator that supplies the vcc voltage
 
 required:
   - compatible
index 73d2ff3..bc92928 100644 (file)
@@ -25,9 +25,9 @@ properties:
   reg: true
   reset-gpios: true
   vdd-supply:
-     description: regulator that supplies the vdd voltage
+    description: regulator that supplies the vdd voltage
   vddi-supply:
-     description: regulator that supplies the vddi voltage
+    description: regulator that supplies the vddi voltage
   backlight: true
 
 required:
index d766c94..4a36aa6 100644 (file)
@@ -27,10 +27,10 @@ properties:
   compatible:
     items:
       - enum:
-        - motorola,droid4-panel        # Panel from Motorola Droid4 phone
-        - nokia,himalaya               # Panel from Nokia N950 phone
-        - tpo,taal                     # Panel from OMAP4 SDP board
-      - const: panel-dsi-cm            # Generic DSI command mode panel compatible fallback
+          - motorola,droid4-panel        # Panel from Motorola Droid4 phone
+          - nokia,himalaya               # Panel from Nokia N950 phone
+          - tpo,taal                     # Panel from OMAP4 SDP board
+      - const: panel-dsi-cm              # Generic DSI command mode panel compatible fallback
 
   reg:
     maxItems: 1
index 182c19c..9bf592d 100644 (file)
@@ -59,7 +59,7 @@ description: |
 properties:
 
   clock-frequency:
-   description: Panel clock in Hz
+    description: Panel clock in Hz
 
   hactive:
     $ref: /schemas/types.yaml#/definitions/uint32
@@ -200,15 +200,15 @@ properties:
     description: Enable double clock mode
 
 required:
- - clock-frequency
- - hactive
- - vactive
- - hfront-porch
- - hback-porch
- - hsync-len
- - vfront-porch
- - vback-porch
- - vsync-len
 - clock-frequency
 - hactive
 - vactive
 - hfront-porch
 - hback-porch
 - hsync-len
 - vfront-porch
 - vback-porch
 - vsync-len
 
 additionalProperties: false
 
index a35ba16..3947779 100644 (file)
@@ -10,8 +10,8 @@ maintainers:
   - Philippe CORNU <philippe.cornu@st.com>
 
 description: |
-             The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
-             panel connected using a MIPI-DSI video interface.
+  The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
+  panel connected using a MIPI-DSI video interface.
 
 allOf:
   - $ref: panel-common.yaml#
index 7a685d0..44ce98f 100644 (file)
@@ -18,9 +18,9 @@ properties:
   reg: true
   reset-gpios: true
   vdd3-supply:
-     description: core voltage supply
+    description: core voltage supply
   vci-supply:
-     description: voltage supply for analog circuits
+    description: voltage supply for analog circuits
 
 required:
   - compatible
index b36f39f..076b057 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Visionox model RM69299 Panels Device Tree Bindings.
 
 maintainers:
- - Harigovindan P <harigovi@codeaurora.org>
 - Harigovindan P <harigovi@codeaurora.org>
 
 description: |
   This binding is for display panels using a Visionox RM692999 panel.
index 3be76d1..69cc7e8 100644 (file)
@@ -45,7 +45,7 @@ properties:
 
   phy-dsi-supply:
     description:
-        Phandle of the regulator that provides the supply voltage.
+      Phandle of the regulator that provides the supply voltage.
 
   ports:
     type: object
@@ -147,4 +147,3 @@ examples:
 
 ...
 
-
index bbd7659..173730d 100644 (file)
@@ -78,7 +78,7 @@ properties:
       - const: vp4
 
   interrupts:
-     items:
+    items:
       - description: common_m DSS Master common
       - description: common_s0 DSS Shared common 0
       - description: common_s1 DSS Shared common 1
index 3bbe952..4cc0112 100644 (file)
@@ -56,8 +56,8 @@ properties:
 
   memory-region:
     description:
-       phandle to a node describing reserved memory (System RAM memory)
-       used by DSP (see bindings/reserved-memory/reserved-memory.txt)
+      phandle to a node describing reserved memory (System RAM memory)
+      used by DSP (see bindings/reserved-memory/reserved-memory.txt)
     maxItems: 1
 
 required:
index c9534d2..822975d 100644 (file)
@@ -177,10 +177,10 @@ properties:
 dependencies:
   # 'vendor,bool-property' is only allowed when 'vendor,string-array-property'
   # is present
-  vendor,bool-property: [ vendor,string-array-property ]
+  vendor,bool-property: [ 'vendor,string-array-property' ]
   # Expressing 2 properties in both orders means all of the set of properties
   # must be present or none of them.
-  vendor,string-array-property: [ vendor,bool-property ]
+  vendor,string-array-property: [ 'vendor,bool-property' ]
 
 required:
   - compatible
index 893d81e..b26d4b4 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: IBM FSI-attached SPI controllers
 
 maintainers:
- - Eddie James <eajames@linux.ibm.com>
 - Eddie James <eajames@linux.ibm.com>
 
 description: |
   This binding describes an FSI CFAM engine called the FSI2SPI. Therefore this
index 4f2cbd8..c213cb9 100644 (file)
@@ -19,10 +19,8 @@ properties:
 
   reg:
     items:
-      - description: the I/O address containing the GPIO controller
-                     registers.
-      - description: the I/O address containing the Chip Common A interrupt
-                     registers.
+      - description: the I/O address containing the GPIO controller registers.
+      - description: the I/O address containing the Chip Common A interrupt registers.
 
   gpio-controller: true
 
index ccf5b50..dfa1133 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale MXS GPIO controller
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
   - Anson Huang <Anson.Huang@nxp.com>
 
 description: |
index 397d938..3ad2293 100644 (file)
@@ -13,39 +13,39 @@ properties:
   compatible:
     oneOf:
       - items:
-         - enum:
-             - renesas,gpio-r8a7778      # R-Car M1
-             - renesas,gpio-r8a7779      # R-Car H1
-         - const: renesas,rcar-gen1-gpio # R-Car Gen1
+          - enum:
+              - renesas,gpio-r8a7778      # R-Car M1
+              - renesas,gpio-r8a7779      # R-Car H1
+          - const: renesas,rcar-gen1-gpio # R-Car Gen1
 
       - items:
-         - enum:
-             - renesas,gpio-r8a7742      # RZ/G1H
-             - renesas,gpio-r8a7743      # RZ/G1M
-             - renesas,gpio-r8a7744      # RZ/G1N
-             - renesas,gpio-r8a7745      # RZ/G1E
-             - renesas,gpio-r8a77470     # RZ/G1C
-             - renesas,gpio-r8a7790      # R-Car H2
-             - renesas,gpio-r8a7791      # R-Car M2-W
-             - renesas,gpio-r8a7792      # R-Car V2H
-             - renesas,gpio-r8a7793      # R-Car M2-N
-             - renesas,gpio-r8a7794      # R-Car E2
-         - const: renesas,rcar-gen2-gpio # R-Car Gen2 or RZ/G1
+          - enum:
+              - renesas,gpio-r8a7742      # RZ/G1H
+              - renesas,gpio-r8a7743      # RZ/G1M
+              - renesas,gpio-r8a7744      # RZ/G1N
+              - renesas,gpio-r8a7745      # RZ/G1E
+              - renesas,gpio-r8a77470     # RZ/G1C
+              - renesas,gpio-r8a7790      # R-Car H2
+              - renesas,gpio-r8a7791      # R-Car M2-W
+              - renesas,gpio-r8a7792      # R-Car V2H
+              - renesas,gpio-r8a7793      # R-Car M2-N
+              - renesas,gpio-r8a7794      # R-Car E2
+          - const: renesas,rcar-gen2-gpio # R-Car Gen2 or RZ/G1
 
       - items:
-         - enum:
-             - renesas,gpio-r8a774a1     # RZ/G2M
-             - renesas,gpio-r8a774b1     # RZ/G2N
-             - renesas,gpio-r8a774c0     # RZ/G2E
-             - renesas,gpio-r8a7795      # R-Car H3
-             - renesas,gpio-r8a7796      # R-Car M3-W
-             - renesas,gpio-r8a77961     # R-Car M3-W+
-             - renesas,gpio-r8a77965     # R-Car M3-N
-             - renesas,gpio-r8a77970     # R-Car V3M
-             - renesas,gpio-r8a77980     # R-Car V3H
-             - renesas,gpio-r8a77990     # R-Car E3
-             - renesas,gpio-r8a77995     # R-Car D3
-         - const: renesas,rcar-gen3-gpio # R-Car Gen3 or RZ/G2
+          - enum:
+              - renesas,gpio-r8a774a1     # RZ/G2M
+              - renesas,gpio-r8a774b1     # RZ/G2N
+              - renesas,gpio-r8a774c0     # RZ/G2E
+              - renesas,gpio-r8a7795      # R-Car H3
+              - renesas,gpio-r8a7796      # R-Car M3-W
+              - renesas,gpio-r8a77961     # R-Car M3-W+
+              - renesas,gpio-r8a77965     # R-Car M3-N
+              - renesas,gpio-r8a77970     # R-Car V3M
+              - renesas,gpio-r8a77980     # R-Car V3H
+              - renesas,gpio-r8a77990     # R-Car E3
+              - renesas,gpio-r8a77995     # R-Car D3
+          - const: renesas,rcar-gen3-gpio # R-Car Gen3 or RZ/G2
 
   reg:
     maxItems: 1
index e1ac6ff..4843df1 100644 (file)
@@ -26,7 +26,8 @@ properties:
       - description: AXI/master interface clock
       - description: GPU core clock
       - description: Shader clock (only required if GPU has feature PIPE_3D)
-      - description: AHB/slave interface clock (only required if GPU can gate slave interface independently)
+      - description: AHB/slave interface clock (only required if GPU can gate 
+          slave interface independently)
     minItems: 1
     maxItems: 4
 
diff --git a/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.txt
deleted file mode 100644 (file)
index 4563f52..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-Qualcomm Hardware Mutex Block:
-
-The hardware block provides mutexes utilized between different processors on
-the SoC as part of the communication protocol used by these processors.
-
-- compatible:
-       Usage: required
-       Value type: <string>
-       Definition: must be one of:
-                   "qcom,sfpb-mutex",
-                   "qcom,tcsr-mutex"
-
-- syscon:
-       Usage: required
-       Value type: <prop-encoded-array>
-       Definition: one cell containing:
-                   syscon phandle
-                   offset of the hwmutex block within the syscon
-                   stride of the hwmutex registers
-
-- #hwlock-cells:
-       Usage: required
-       Value type: <u32>
-       Definition: must be 1, the specified cell represent the lock id
-                   (hwlock standard property, see hwlock.txt)
-
-Example:
-
-       tcsr_mutex_block: syscon@fd484000 {
-               compatible = "syscon";
-               reg = <0xfd484000 0x2000>;
-       };
-
-       hwlock@fd484000 {
-               compatible = "qcom,tcsr-mutex";
-               syscon = <&tcsr_mutex_block 0 0x80>;
-
-               #hwlock-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/qcom-hwspinlock.yaml
new file mode 100644 (file)
index 0000000..1c7149f
--- /dev/null
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwlock/qcom-hwspinlock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Hardware Mutex Block
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+  The hardware block provides mutexes utilized between different processors on
+  the SoC as part of the communication protocol used by these processors.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sfpb-mutex
+      - qcom,tcsr-mutex
+
+  reg:
+    maxItems: 1
+
+  '#hwlock-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - '#hwlock-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+        tcsr_mutex: hwlock@1f40000 {
+                compatible = "qcom,tcsr-mutex";
+                reg = <0x01f40000 0x40000>;
+                #hwlock-cells = <1>;
+        };
+...
index af35b77..7898b9d 100644 (file)
@@ -19,7 +19,7 @@ description: |+
 properties:
   compatible:
     enum:
-        - adi,axi-fan-control-1.00.a
+      - adi,axi-fan-control-1.00.a
 
   reg:
     maxItems: 1
index 869f2ae..8105369 100644 (file)
@@ -20,22 +20,22 @@ properties:
           - const: fsl,imx1-i2c
       - items:
           - enum:
-            - fsl,imx25-i2c
-            - fsl,imx27-i2c
-            - fsl,imx31-i2c
-            - fsl,imx50-i2c
-            - fsl,imx51-i2c
-            - fsl,imx53-i2c
-            - fsl,imx6q-i2c
-            - fsl,imx6sl-i2c
-            - fsl,imx6sx-i2c
-            - fsl,imx6sll-i2c
-            - fsl,imx6ul-i2c
-            - fsl,imx7s-i2c
-            - fsl,imx8mq-i2c
-            - fsl,imx8mm-i2c
-            - fsl,imx8mn-i2c
-            - fsl,imx8mp-i2c
+              - fsl,imx25-i2c
+              - fsl,imx27-i2c
+              - fsl,imx31-i2c
+              - fsl,imx50-i2c
+              - fsl,imx51-i2c
+              - fsl,imx53-i2c
+              - fsl,imx6q-i2c
+              - fsl,imx6sl-i2c
+              - fsl,imx6sx-i2c
+              - fsl,imx6sll-i2c
+              - fsl,imx6ul-i2c
+              - fsl,imx7s-i2c
+              - fsl,imx8mq-i2c
+              - fsl,imx8mm-i2c
+              - fsl,imx8mn-i2c
+              - fsl,imx8mp-i2c
           - const: fsl,imx21-i2c
 
   reg:
index 88b71c1..7f0194f 100644 (file)
@@ -14,6 +14,7 @@ Required properties:
       "mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
       "mediatek,mt8173-i2c": for MediaTek MT8173
       "mediatek,mt8183-i2c": for MediaTek MT8183
+      "mediatek,mt8192-i2c": for MediaTek MT8192
       "mediatek,mt8516-i2c", "mediatek,mt2712-i2c": for MediaTek MT8516
   - reg: physical base address of the controller and dma base, length of memory
     mapped region.
index d3134ed..21ae7bc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale MXS Inter IC (I2C) Controller
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 properties:
   compatible:
index da6e8bd..015885d 100644 (file)
@@ -16,8 +16,8 @@ allOf:
         required:
           - mrvl,i2c-polling
     then:
-        required:
-          - interrupts
+      required:
+        - interrupts
 
 properties:
   compatible:
index 438ae12..a21c359 100644 (file)
@@ -72,6 +72,16 @@ wants to support one of the below features, it should adapt these bindings.
        this information to adapt power management to keep the arbitration awake
        all the time, for example. Can not be combined with 'single-master'.
 
+- pinctrl
+       add extra pinctrl to configure SCL/SDA pins to GPIO function for bus
+       recovery, call it "gpio" or "recovery" (deprecated) state
+
+- scl-gpios
+       specify the gpio related to SCL pin. Used for GPIO bus recovery.
+
+- sda-gpios
+       specify the gpio related to SDA pin. Optional for GPIO bus recovery.
+
 - single-master
        states that there is no other master active on this bus. The OS can use
        this information to detect a stalled bus more reliably, for example.
index 2ceb05b..5b5ae40 100644 (file)
@@ -27,6 +27,9 @@ properties:
           - const: allwinner,sun50i-a64-i2c
           - const: allwinner,sun6i-a31-i2c
       - items:
+          - const: allwinner,sun50i-a100-i2c
+          - const: allwinner,sun6i-a31-i2c
+      - items:
           - const: allwinner,sun50i-h6-i2c
           - const: allwinner,sun6i-a31-i2c
 
index a03f9f5..96d869a 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
        "renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC.
        "renesas,i2c-r8a774b1" if the device is a part of a R8A774B1 SoC.
        "renesas,i2c-r8a774c0" if the device is a part of a R8A774C0 SoC.
+       "renesas,i2c-r8a774e1" if the device is a part of a R8A774E1 SoC.
        "renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
        "renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
        "renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC.
index 89facb0..93d4128 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
                        - "renesas,iic-r8a774a1" (RZ/G2M)
                        - "renesas,iic-r8a774b1" (RZ/G2N)
                        - "renesas,iic-r8a774c0" (RZ/G2E)
+                       - "renesas,iic-r8a774e1" (RZ/G2H)
                        - "renesas,iic-r8a7790" (R-Car H2)
                        - "renesas,iic-r8a7791" (R-Car M2-W)
                        - "renesas,iic-r8a7792" (R-Car V2H)
index 5117ad6..cbb8819 100644 (file)
@@ -53,10 +53,10 @@ properties:
 
   standby-gpios:
     description:
-       Must be the device tree identifier of the STBY pin. This pin is used
-       to place the AD7606 into one of two power-down modes, Standby mode or
-       Shutdown mode. As the line is active low, it should be marked
-       GPIO_ACTIVE_LOW.
+      Must be the device tree identifier of the STBY pin. This pin is used
+      to place the AD7606 into one of two power-down modes, Standby mode or
+      Shutdown mode. As the line is active low, it should be marked
+      GPIO_ACTIVE_LOW.
     maxItems: 1
 
   adi,first-data-gpios:
index a0ebb46..cccd303 100644 (file)
@@ -10,7 +10,7 @@ maintainers:
   - Jonathan Cameron <jic23@kernel.org>
 
 description: |
-   Family of simple ADCs with i2c inteface and internal references.
+  Family of simple ADCs with i2c inteface and internal references.
 
 properties:
   compatible:
index e6263b6..0ca9924 100644 (file)
@@ -24,11 +24,11 @@ properties:
           - const: qcom,spmi-adc-rev2
 
       - items:
-        - enum:
-          - qcom,spmi-vadc
-          - qcom,spmi-adc5
-          - qcom,spmi-adc-rev2
-          - qcom,spmi-adc7
+          - enum:
+              - qcom,spmi-vadc
+              - qcom,spmi-adc5
+              - qcom,spmi-adc-rev2
+              - qcom,spmi-adc7
 
   reg:
     description: VADC base address in the SPMI PMIC register map
@@ -97,16 +97,14 @@ patternProperties:
             input signal is multiplied. For example, <1 3> indicates the signal is scaled
             down to 1/3 of its value before ADC measurement.
             If property is not found default value depending on chip will be used.
-        allOf:
-          - $ref: /schemas/types.yaml#/definitions/uint32-array
+        $ref: /schemas/types.yaml#/definitions/uint32-array
         oneOf:
           - items:
-            - const: 1
-            - enum: [ 1, 3, 4, 6, 20, 8, 10 ]
-
+              - const: 1
+              - enum: [ 1, 3, 4, 6, 20, 8, 10 ]
           - items:
-            - const: 10
-            - const: 81
+              - const: 10
+              - const: 81
 
       qcom,ratiometric:
         description: |
index bcff82a..1bb7619 100644 (file)
@@ -17,10 +17,10 @@ properties:
       - const: rockchip,rk3399-saradc
       - items:
           - enum:
-            - rockchip,px30-saradc
-            - rockchip,rk3308-saradc
-            - rockchip,rk3328-saradc
-            - rockchip,rv1108-saradc
+              - rockchip,px30-saradc
+              - rockchip,rk3308-saradc
+              - rockchip,rk3328-saradc
+              - rockchip,rv1108-saradc
           - const: rockchip,rk3399-saradc
 
   reg:
index 1c6d496..5342360 100644 (file)
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: HMC425A 6-bit Digital Step Attenuator
 
 maintainers:
-- Michael Hennerich <michael.hennerich@analog.com>
-- Beniamin Bia <beniamin.bia@analog.com>
+  - Michael Hennerich <michael.hennerich@analog.com>
+  - Beniamin Bia <beniamin.bia@analog.com>
 
 description: |
   Digital Step Attenuator IIO device with gpio interface.
index 69e8931..9a89b34 100644 (file)
@@ -31,10 +31,10 @@ properties:
       - atlas,co2-ezo
 
   reg:
-     maxItems: 1
+    maxItems: 1
 
   interrupts:
-     maxItems: 1
+    maxItems: 1
 
 required:
   - compatible
index 58d81ca..82424e0 100644 (file)
@@ -61,17 +61,17 @@ properties:
         const: 0
 
       adi,range-microamp:
-          description: Output range of the channel.
-          oneOf:
-            - items:
-                - const: 0
-                - const: 300000
-            - items:
-                - const: -60000
-                - const: 0
-            - items:
-                - const: -60000
-                - const: 300000
+        description: Output range of the channel.
+        oneOf:
+          - items:
+              - const: 0
+              - const: 300000
+          - items:
+              - const: -60000
+              - const: 0
+          - items:
+              - const: -60000
+              - const: 300000
 
   channel@1:
     description: Represents an external channel which are
@@ -84,10 +84,10 @@ properties:
         const: 1
 
       adi,range-microamp:
-          description: Output range of the channel.
-          items:
-            - const: 0
-            - enum: [ 140000, 250000 ]
+        description: Output range of the channel.
+        items:
+          - const: 0
+          - enum: [140000, 250000]
 
   channel@2:
     description: Represents an external channel which are
@@ -100,10 +100,10 @@ properties:
         const: 2
 
       adi,range-microamp:
-          description: Output range of the channel.
-          items:
-            - const: 0
-            - enum: [ 55000, 150000 ]
+        description: Output range of the channel.
+        items:
+          - const: 0
+          - enum: [55000, 150000]
 
 patternProperties:
   "^channel@([3-5])$":
@@ -116,19 +116,19 @@ patternProperties:
         maximum: 5
 
       adi,range-microamp:
-          description: Output range of the channel.
-          items:
-            - const: 0
-            - enum: [ 45000, 100000 ]
+        description: Output range of the channel.
+        items:
+          - const: 0
+          - enum: [45000, 100000]
 
 required:
-- reg
-- channel@0
-- channel@1
-- channel@2
-- channel@3
-- channel@4
-- channel@5
+  - reg
+  - channel@0
+  - channel@1
+  - channel@2
+  - channel@3
+  - channel@4
+  - channel@5
 
 examples:
   - |
index da8f2e8..58887a4 100644 (file)
@@ -36,15 +36,15 @@ required:
 additionalProperties: false
 
 examples:
-- |
-  i2c {
-      #address-cells = <1>;
-      #size-cells = <0>;
-
-      light-sensor@51 {
-              compatible = "vishay,vcnl4200";
-              reg = <0x51>;
-              proximity-near-level = <220>;
-      };
-  };
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        light-sensor@51 {
+            compatible = "vishay,vcnl4200";
+            reg = <0x51>;
+            proximity-near-level = <220>;
+        };
+    };
 ...
index f4393bf..f0b336a 100644 (file)
@@ -13,15 +13,15 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - asahi-kasei,ak8975
-        - asahi-kasei,ak8963
-        - asahi-kasei,ak09911
-        - asahi-kasei,ak09912
+          - asahi-kasei,ak8975
+          - asahi-kasei,ak8963
+          - asahi-kasei,ak09911
+          - asahi-kasei,ak09912
       - enum:
-        - ak8975
-        - ak8963
-        - ak09911
-        - ak09912
+          - ak8975
+          - ak8963
+          - ak09911
+          - ak09912
         deprecated: true
 
   reg:
index 4190253..51dba64 100644 (file)
@@ -39,8 +39,8 @@ properties:
     description:
       The driver current for the LED used in proximity sensing.
     enum: [0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
-          100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000,
-          180000, 190000, 200000]
+           100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000,
+           180000, 190000, 200000]
     default: 20000
 
 required:
index 40ccbe7..0f79d9a 100644 (file)
@@ -307,7 +307,7 @@ patternProperties:
           mode.
         $ref: /schemas/types.yaml#/definitions/uint32
         enum: [0, 250, 500, 1000, 5000, 10000, 25000, 50000, 100000, 250000,
-          500000, 1000000]
+               500000, 1000000]
 
       adi,custom-thermistor:
         description:
index 7432c6e..f21db81 100644 (file)
@@ -24,19 +24,19 @@ properties:
       - const: fsl,imx21-kpp
       - items:
           - enum:
-            - fsl,imx25-kpp
-            - fsl,imx27-kpp
-            - fsl,imx31-kpp
-            - fsl,imx35-kpp
-            - fsl,imx51-kpp
-            - fsl,imx53-kpp
-            - fsl,imx50-kpp
-            - fsl,imx6q-kpp
-            - fsl,imx6sx-kpp
-            - fsl,imx6sl-kpp
-            - fsl,imx6sll-kpp
-            - fsl,imx6ul-kpp
-            - fsl,imx7d-kpp
+              - fsl,imx25-kpp
+              - fsl,imx27-kpp
+              - fsl,imx31-kpp
+              - fsl,imx35-kpp
+              - fsl,imx51-kpp
+              - fsl,imx53-kpp
+              - fsl,imx50-kpp
+              - fsl,imx6q-kpp
+              - fsl,imx6sx-kpp
+              - fsl,imx6sl-kpp
+              - fsl,imx6sll-kpp
+              - fsl,imx6ul-kpp
+              - fsl,imx7d-kpp
           - const: fsl,imx21-kpp
 
   reg:
index 8c73e52..3225c8d 100644 (file)
@@ -51,7 +51,7 @@ required:
   - touchscreen-max-pressure
 
 examples:
-- |
+  - |
     #include <dt-bindings/interrupt-controller/irq.h>
     i2c {
       #address-cells = <1>;
index 024b262..4ce1094 100644 (file)
@@ -20,11 +20,11 @@ maintainers:
 allOf:
   - $ref: touchscreen.yaml#
   - if:
-     properties:
-       compatible:
-         contains:
-           enum:
-             - evervision,ev-ft5726
+      properties:
+        compatible:
+          contains:
+            enum:
+              - evervision,ev-ft5726
 
     then:
       properties:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml b/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml
new file mode 100644 (file)
index 0000000..007adbc
--- /dev/null
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/eeti,exc3000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: EETI EXC3000 series touchscreen controller
+
+maintainers:
+  - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    enum:
+      - eeti,exc3000
+      - eeti,exc80h60
+      - eeti,exc80h84
+  reg:
+    const: 0x2a
+  interrupts:
+    maxItems: 1
+  reset-gpios:
+    maxItems: 1
+  touchscreen-size-x: true
+  touchscreen-size-y: true
+  touchscreen-inverted-x: true
+  touchscreen-inverted-y: true
+  touchscreen-swapped-x-y: true
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - touchscreen-size-x
+  - touchscreen-size-y
+
+additionalProperties: false
+
+examples:
+  - |
+    #include "dt-bindings/interrupt-controller/irq.h"
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        touchscreen@2a {
+                compatible = "eeti,exc3000";
+                reg = <0x2a>;
+                interrupt-parent = <&gpio1>;
+                interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+                touchscreen-size-x = <4096>;
+                touchscreen-size-y = <4096>;
+                touchscreen-inverted-x;
+                touchscreen-swapped-x-y;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
deleted file mode 100644 (file)
index 68291b9..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-* EETI EXC3000 Multiple Touch Controller
-
-Required properties:
-- compatible: must be "eeti,exc3000"
-- reg: i2c slave address
-- interrupts: touch controller interrupt
-- touchscreen-size-x: See touchscreen.txt
-- touchscreen-size-y: See touchscreen.txt
-
-Optional properties:
-- touchscreen-inverted-x: See touchscreen.txt
-- touchscreen-inverted-y: See touchscreen.txt
-- touchscreen-swapped-x-y: See touchscreen.txt
-
-Example:
-
-       touchscreen@2a {
-               compatible = "eeti,exc3000";
-               reg = <0x2a>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
-               touchscreen-size-x = <4096>;
-               touchscreen-size-y = <4096>;
-               touchscreen-inverted-x;
-               touchscreen-swapped-x-y;
-       };
index e81cfa5..da5b0d8 100644 (file)
@@ -35,9 +35,8 @@ properties:
     maxItems: 1
 
   irq-gpios:
-    description: GPIO pin used for IRQ.
-                 The driver uses the interrupt gpio pin as
-                 output to reset the device.
+    description: GPIO pin used for IRQ. The driver uses the interrupt gpio pin
+      as output to reset the device.
     maxItems: 1
 
   reset-gpios:
index d7dac16..36dc7b5 100644 (file)
@@ -33,8 +33,8 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-min-pressure:
-    description: minimum pressure on the touchscreen to be achieved in order for the
-                 touchscreen driver to report a touch event.
+    description: minimum pressure on the touchscreen to be achieved in order
+      for the touchscreen driver to report a touch event.
     $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-fuzz-x:
@@ -46,13 +46,13 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-fuzz-pressure:
-    description: pressure noise value of the absolute input device (arbitrary range
-                 dependent on the controller)
+    description: pressure noise value of the absolute input device (arbitrary
+      range dependent on the controller)
     $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-average-samples:
-    description: Number of data samples which are averaged for each read (valid values
-                 dependent on the controller)
+    description: Number of data samples which are averaged for each read (valid
+      values dependent on the controller)
     $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-inverted-x:
index ff09550..a887373 100644 (file)
@@ -25,17 +25,17 @@ properties:
   compatible:
     oneOf:
       - items:
-        - enum:
-          - fsl,imx8mn-nic
-          - fsl,imx8mm-nic
-          - fsl,imx8mq-nic
-        - const: fsl,imx8m-nic
+          - enum:
+              - fsl,imx8mn-nic
+              - fsl,imx8mm-nic
+              - fsl,imx8mq-nic
+          - const: fsl,imx8m-nic
       - items:
-        - enum:
-          - fsl,imx8mn-noc
-          - fsl,imx8mm-noc
-          - fsl,imx8mq-noc
-        - const: fsl,imx8m-noc
+          - enum:
+              - fsl,imx8mn-noc
+              - fsl,imx8mm-noc
+              - fsl,imx8mq-noc
+          - const: fsl,imx8m-noc
       - const: fsl,imx8m-nic
 
   reg:
index d01bac8..8659048 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/interconnect/qcom,sc7180.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title:  Qualcomm SC7180 Network-On-Chip Interconnect
+title: Qualcomm SC7180 Network-On-Chip Interconnect
 
 maintainers:
   - Odelu Kukatla <okukatla@codeaurora.org>
index 7453674..dab17c0 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/interconnect/qcom,sdm845.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title:  Qualcomm SDM845 Network-On-Chip Interconnect
+title: Qualcomm SDM845 Network-On-Chip Interconnect
 
 maintainers:
   - Georgi Djakov <georgi.djakov@linaro.org>
index 96f8803..0688996 100644 (file)
@@ -42,8 +42,8 @@ properties:
       - items:
           - const: arm,gic-400
           - enum:
-             - arm,cortex-a15-gic
-             - arm,cortex-a7-gic
+              - arm,cortex-a15-gic
+              - arm,cortex-a7-gic
 
       - items:
           - const: arm,arm1176jzf-devchip-gic
index 28b27e1..02a3cf4 100644 (file)
@@ -16,20 +16,20 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-intc
-        - ingenic,jz4760-intc
-        - ingenic,jz4780-intc
+          - ingenic,jz4740-intc
+          - ingenic,jz4760-intc
+          - ingenic,jz4780-intc
       - items:
-        - enum:
-          - ingenic,jz4775-intc
-          - ingenic,jz4770-intc
-        - const: ingenic,jz4760-intc
+          - enum:
+              - ingenic,jz4775-intc
+              - ingenic,jz4770-intc
+          - const: ingenic,jz4760-intc
       - items:
-        - const: ingenic,x1000-intc
-        - const: ingenic,jz4780-intc
+          - const: ingenic,x1000-intc
+          - const: ingenic,jz4780-intc
       - items:
-        - const: ingenic,jz4725b-intc
-        - const: ingenic,jz4740-intc
+          - const: ingenic,jz4725b-intc
+          - const: ingenic,jz4740-intc
 
   "#interrupt-cells":
     const: 1
index 9f0eb3a..ce6aaff 100644 (file)
@@ -42,14 +42,13 @@ properties:
       Specifies the list of CPU interrupt vectors to which the GIC may not
       route interrupts. This property is ignored if the CPU is started in EIC
       mode.
-    allOf:
-      - $ref: /schemas/types.yaml#definitions/uint32-array
-      - minItems: 1
-        maxItems: 6
-        uniqueItems: true
-        items:
-          minimum: 2
-          maximum: 7
+    $ref: /schemas/types.yaml#definitions/uint32-array
+    minItems: 1
+    maxItems: 6
+    uniqueItems: true
+    items:
+      minimum: 2
+      maximum: 7
 
   mti,reserved-ipi-vectors:
     description: |
@@ -57,13 +56,12 @@ properties:
       It accepts two values: the 1st is the starting interrupt and the 2nd is
       the size of the reserved range. If not specified, the driver will
       allocate the last (2 * number of VPEs in the system).
-    allOf:
-      - $ref: /schemas/types.yaml#definitions/uint32-array
-      - items:
-          - minimum: 0
-            maximum: 254
-          - minimum: 2
-            maximum: 254
+    $ref: /schemas/types.yaml#definitions/uint32-array
+    items:
+      - minimum: 0
+        maximum: 254
+      - minimum: 2
+        maximum: 254
 
   timer:
     type: object
index d7ceb4c..503160a 100644 (file)
@@ -37,7 +37,18 @@ properties:
           - enum:
               - qcom,sc7180-smmu-500
               - qcom,sdm845-smmu-500
+              - qcom,sm8150-smmu-500
+              - qcom,sm8250-smmu-500
           - const: arm,mmu-500
+      - description: Marvell SoCs implementing "arm,mmu-500"
+        items:
+          - const: marvell,ap806-smmu-500
+          - const: arm,mmu-500
+      - description: NVIDIA SoCs that program two ARM MMU-500s identically
+        items:
+          - enum:
+              - nvidia,tegra194-smmu
+          - const: nvidia,smmu-500
       - items:
           - const: arm,mmu-500
           - const: arm,smmu-v2
@@ -55,7 +66,8 @@ properties:
           - cavium,smmu-v2
 
   reg:
-    maxItems: 1
+    minItems: 1
+    maxItems: 2
 
   '#global-interrupts':
     description: The number of global interrupts exposed by the device.
@@ -138,6 +150,23 @@ required:
 
 additionalProperties: false
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - nvidia,tegra194-smmu
+    then:
+      properties:
+        reg:
+          minItems: 2
+          maxItems: 2
+    else:
+      properties:
+        reg:
+          maxItems: 1
+
 examples:
   - |+
     /* SMMU with stream matching or stream indexing */
index ce59a50..c1ccd85 100644 (file)
@@ -58,6 +58,7 @@ Required properties:
 - compatible : must be one of the following string:
        "mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW.
        "mediatek,mt2712-m4u" for mt2712 which uses generation two m4u HW.
+       "mediatek,mt6779-m4u" for mt6779 which uses generation two m4u HW.
        "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
                                                     generation one m4u HW.
        "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
@@ -78,6 +79,7 @@ Required properties:
        Specifies the mtk_m4u_id as defined in
        dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
        dt-binding/memory/mt2712-larb-port.h for mt2712,
+       dt-binding/memory/mt6779-larb-port.h for mt6779,
        dt-binding/memory/mt8173-larb-port.h for mt8173, and
        dt-binding/memory/mt8183-larb-port.h for mt8183.
 
index 5e4fe54..6bfa090 100644 (file)
@@ -36,6 +36,7 @@ properties:
               - renesas,ipmmu-r8a774c0 # RZ/G2E
               - renesas,ipmmu-r8a7795  # R-Car H3
               - renesas,ipmmu-r8a7796  # R-Car M3-W
+              - renesas,ipmmu-r8a77961 # R-Car M3-W+
               - renesas,ipmmu-r8a77965 # R-Car M3-N
               - renesas,ipmmu-r8a77970 # R-Car V3M
               - renesas,ipmmu-r8a77980 # R-Car V3H
index 32e0896..47938e3 100644 (file)
@@ -79,7 +79,8 @@ properties:
     description: |
       kHz; switching frequency.
     $ref: /schemas/types.yaml#/definitions/uint32
-    enum: [ 600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371, 1600, 1920, 2400, 3200, 4800, 9600 ]
+    enum: [ 600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371, 1600, 1920, 
+            2400, 3200, 4800, 9600 ]
 
   qcom,ovp:
     description: |
index 3b35eb5..8a3470b 100644 (file)
@@ -29,12 +29,12 @@ properties:
       - const: fsl,imx8-mu-scu
       - items:
           - enum:
-            - fsl,imx7s-mu
-            - fsl,imx8mq-mu
-            - fsl,imx8mm-mu
-            - fsl,imx8mn-mu
-            - fsl,imx8mp-mu
-            - fsl,imx8qxp-mu
+              - fsl,imx7s-mu
+              - fsl,imx8mq-mu
+              - fsl,imx8mm-mu
+              - fsl,imx8mn-mu
+              - fsl,imx8mp-mu
+              - fsl,imx8qxp-mu
           - const: fsl,imx6sx-mu
       - description: To communicate with i.MX8 SCU with fast IPC
         items:
index 4ac2123..168beeb 100644 (file)
@@ -24,7 +24,7 @@ properties:
   compatible:
     items:
       - enum:
-        - qcom,sm8250-ipcc
+          - qcom,sm8250-ipcc
       - const: qcom,ipcc
 
   reg:
index 75196d1..a258832 100644 (file)
@@ -20,8 +20,8 @@ properties:
     oneOf:
       - const: allwinner,sun8i-a83t-de2-rotate
       - items:
-        - const: allwinner,sun50i-a64-de2-rotate
-        - const: allwinner,sun8i-a83t-de2-rotate
+          - const: allwinner,sun50i-a64-de2-rotate
+          - const: allwinner,sun8i-a83t-de2-rotate
 
   reg:
     maxItems: 1
index 8707df6..6a56214 100644 (file)
@@ -20,8 +20,8 @@ properties:
     oneOf:
       - const: allwinner,sun8i-h3-deinterlace
       - items:
-        - const: allwinner,sun50i-a64-deinterlace
-        - const: allwinner,sun8i-h3-deinterlace
+          - const: allwinner,sun50i-a64-deinterlace
+          - const: allwinner,sun8i-h3-deinterlace
 
   reg:
     maxItems: 1
index e0084b2..d8c54f9 100644 (file)
@@ -17,17 +17,17 @@ properties:
   compatible:
     items:
       - enum:
-        - adi,adv7180
-        - adi,adv7180cp
-        - adi,adv7180st
-        - adi,adv7182
-        - adi,adv7280
-        - adi,adv7280-m
-        - adi,adv7281
-        - adi,adv7281-m
-        - adi,adv7281-ma
-        - adi,adv7282
-        - adi,adv7282-m
+          - adi,adv7180
+          - adi,adv7180cp
+          - adi,adv7180st
+          - adi,adv7182
+          - adi,adv7280
+          - adi,adv7280-m
+          - adi,adv7281
+          - adi,adv7281-m
+          - adi,adv7281-ma
+          - adi,adv7282
+          - adi,adv7282-m
 
   reg:
     maxItems: 1
@@ -58,17 +58,16 @@ allOf:
   - if:
       properties:
         compatible:
-          items:
-            - enum:
-              - adi,adv7180
-              - adi,adv7182
-              - adi,adv7280
-              - adi,adv7280-m
-              - adi,adv7281
-              - adi,adv7281-m
-              - adi,adv7281-ma
-              - adi,adv7282
-              - adi,adv7282-m
+          enum:
+            - adi,adv7180
+            - adi,adv7182
+            - adi,adv7280
+            - adi,adv7280-m
+            - adi,adv7281
+            - adi,adv7281-m
+            - adi,adv7281-ma
+            - adi,adv7282
+            - adi,adv7282-m
     then:
       required:
         - port
index cb96e95..21864ab 100644 (file)
@@ -38,39 +38,36 @@ properties:
   dongwoon,aac-mode:
     description:
       Indication of AAC mode select.
-    allOf:
-      - $ref: "/schemas/types.yaml#/definitions/uint32"
-      - enum:
-          - 1    #  AAC2 mode(operation time# 0.48 x Tvib)
-          - 2    #  AAC3 mode(operation time# 0.70 x Tvib)
-          - 3    #  AAC4 mode(operation time# 0.75 x Tvib)
-          - 5    #  AAC8 mode(operation time# 1.13 x Tvib)
-        default: 2
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    enum:
+      - 1    #  AAC2 mode(operation time# 0.48 x Tvib)
+      - 2    #  AAC3 mode(operation time# 0.70 x Tvib)
+      - 3    #  AAC4 mode(operation time# 0.75 x Tvib)
+      - 5    #  AAC8 mode(operation time# 1.13 x Tvib)
+    default: 2
 
   dongwoon,aac-timing:
     description:
       Number of AAC Timing count that controlled by one 6-bit period of
       vibration register AACT[5:0], the unit of which is 100 us.
-    allOf:
-      - $ref: "/schemas/types.yaml#/definitions/uint32"
-      - default: 0x20
-        minimum: 0x00
-        maximum: 0x3f
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    default: 0x20
+    minimum: 0x00
+    maximum: 0x3f
 
   dongwoon,clock-presc:
     description:
       Indication of VCM internal clock dividing rate select, as one multiple
       factor to calculate VCM ring periodic time Tvib.
-    allOf:
-      - $ref: "/schemas/types.yaml#/definitions/uint32"
-      - enum:
-          - 0    #  Dividing Rate -  2
-          - 1    #  Dividing Rate -  1
-          - 2    #  Dividing Rate -  1/2
-          - 3    #  Dividing Rate -  1/4
-          - 4    #  Dividing Rate -  8
-          - 5    #  Dividing Rate -  4
-        default: 1
+    $ref: "/schemas/types.yaml#/definitions/uint32"
+    enum:
+      - 0    #  Dividing Rate -  2
+      - 1    #  Dividing Rate -  1
+      - 2    #  Dividing Rate -  1/2
+      - 3    #  Dividing Rate -  1/4
+      - 4    #  Dividing Rate -  8
+      - 5    #  Dividing Rate -  4
+    default: 1
 
 required:
   - compatible
index 5ad4b8c..107c862 100644 (file)
@@ -5,7 +5,7 @@
 $id: http://devicetree.org/schemas/media/i2c/imi,rdacm2x-gmsl.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title:  IMI D&D RDACM20 and RDACM21 Automotive Camera Platforms
+title: IMI D&D RDACM20 and RDACM21 Automotive Camera Platforms
 
 maintainers:
   - Jacopo Mondi <jacopo+renesas@jmondi.org>
index e7b5431..9ea8270 100644 (file)
@@ -49,7 +49,7 @@ properties:
   gpio-controller: true
 
   '#gpio-cells':
-      const: 2
+    const: 2
 
   ports:
     type: object
index 1956b2a..cde8555 100644 (file)
@@ -138,4 +138,5 @@ examples:
             };
         };
     };
-...
\ No newline at end of file
+...
+
index c9e0682..6d28258 100644 (file)
@@ -19,15 +19,15 @@ properties:
   compatible:
     items:
       - enum:
-        - renesas,r8a774a1-csi2 # RZ/G2M
-        - renesas,r8a774b1-csi2 # RZ/G2N
-        - renesas,r8a774c0-csi2 # RZ/G2E
-        - renesas,r8a7795-csi2  # R-Car H3
-        - renesas,r8a7796-csi2  # R-Car M3-W
-        - renesas,r8a77965-csi2 # R-Car M3-N
-        - renesas,r8a77970-csi2 # R-Car V3M
-        - renesas,r8a77980-csi2 # R-Car V3H
-        - renesas,r8a77990-csi2 # R-Car E3
+          - renesas,r8a774a1-csi2 # RZ/G2M
+          - renesas,r8a774b1-csi2 # RZ/G2N
+          - renesas,r8a774c0-csi2 # RZ/G2E
+          - renesas,r8a7795-csi2  # R-Car H3
+          - renesas,r8a7796-csi2  # R-Car M3-W
+          - renesas,r8a77965-csi2 # R-Car M3-N
+          - renesas,r8a77970-csi2 # R-Car V3M
+          - renesas,r8a77980-csi2 # R-Car V3H
+          - renesas,r8a77990-csi2 # R-Car E3
 
   reg:
     maxItems: 1
index 2b62945..c81dbc3 100644 (file)
@@ -31,8 +31,8 @@ properties:
     oneOf:
       - const: vdpu
       - items:
-        - const: vepu
-        - const: vdpu
+          - const: vepu
+          - const: vdpu
 
   clocks:
     maxItems: 2
index 7b9407c..2961a5b 100644 (file)
@@ -25,7 +25,7 @@ properties:
   compatible:
     items:
       - enum:
-        - xlnx,mipi-csi2-rx-subsystem-5.0
+          - xlnx,mipi-csi2-rx-subsystem-5.0
 
   reg:
     maxItems: 1
@@ -65,13 +65,12 @@ properties:
       0x2d - RAW14
       0x2e - RAW16
       0x2f - RAW20
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-      - anyOf:
-        - minimum: 0x1e
-        - maximum: 0x24
-        - minimum: 0x28
-        - maximum: 0x2f
+    $ref: /schemas/types.yaml#/definitions/uint32
+    oneOf:
+      - minimum: 0x1e
+        maximum: 0x24
+      - minimum: 0x28
+        maximum: 0x2f
 
   xlnx,vfb:
     type: boolean
index dee5131..6848413 100644 (file)
@@ -15,12 +15,12 @@ properties:
       - const: fsl,imx6q-mmdc
       - items:
           - enum:
-            - fsl,imx6qp-mmdc
-            - fsl,imx6sl-mmdc
-            - fsl,imx6sll-mmdc
-            - fsl,imx6sx-mmdc
-            - fsl,imx6ul-mmdc
-            - fsl,imx7ulp-mmdc
+              - fsl,imx6qp-mmdc
+              - fsl,imx6sl-mmdc
+              - fsl,imx6sll-mmdc
+              - fsl,imx6sx-mmdc
+              - fsl,imx6ul-mmdc
+              - fsl,imx7ulp-mmdc
           - const: fsl,imx6q-mmdc
 
   reg:
index 17ba45a..fe0ce19 100644 (file)
@@ -16,11 +16,11 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-nemc
-        - ingenic,jz4780-nemc
+          - ingenic,jz4740-nemc
+          - ingenic,jz4780-nemc
       - items:
-        - const: ingenic,jz4725b-nemc
-        - const: ingenic,jz4740-nemc
+          - const: ingenic,jz4725b-nemc
+          - const: ingenic,jz4740-nemc
 
   "#address-cells":
     const: 2
index b478ade..b645736 100644 (file)
@@ -5,7 +5,7 @@ The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
 Mediatek SMI have two generations of HW architecture, here is the list
 which generation the SoCs use:
 generation 1: mt2701 and mt7623.
-generation 2: mt2712, mt8173 and mt8183.
+generation 2: mt2712, mt6779, mt8173 and mt8183.
 
 There's slight differences between the two SMI, for generation 2, the
 register which control the iommu port is at each larb's register base. But
@@ -18,6 +18,7 @@ Required properties:
 - compatible : must be one of :
        "mediatek,mt2701-smi-common"
        "mediatek,mt2712-smi-common"
+       "mediatek,mt6779-smi-common"
        "mediatek,mt7623-smi-common", "mediatek,mt2701-smi-common"
        "mediatek,mt8173-smi-common"
        "mediatek,mt8183-smi-common"
@@ -35,7 +36,7 @@ Required properties:
   and these 2 option clocks for generation 2 smi HW:
   - "gals0": the path0 clock of GALS(Global Async Local Sync).
   - "gals1": the path1 clock of GALS(Global Async Local Sync).
-  Here is the list which has this GALS: mt8183.
+  Here is the list which has this GALS: mt6779 and mt8183.
 
 Example:
        smi_common: smi@14022000 {
index 4b369b3..8f19dfe 100644 (file)
@@ -6,6 +6,7 @@ Required properties:
 - compatible : must be one of :
                "mediatek,mt2701-smi-larb"
                "mediatek,mt2712-smi-larb"
+               "mediatek,mt6779-smi-larb"
                "mediatek,mt7623-smi-larb", "mediatek,mt2701-smi-larb"
                "mediatek,mt8173-smi-larb"
                "mediatek,mt8183-smi-larb"
@@ -21,7 +22,7 @@ Required properties:
   - "gals": the clock for GALS(Global Async Local Sync).
   Here is the list which has this GALS: mt8183.
 
-Required property for mt2701, mt2712 and mt7623:
+Required property for mt2701, mt2712, mt6779 and mt7623:
 - mediatek,larb-id :the hardware id of this larb.
 
 Example:
index 6600056..7bfe120 100644 (file)
@@ -26,10 +26,10 @@ properties:
   compatible:
     items:
       - enum:
-        - renesas,r8a77970-rpc-if       # R-Car V3M
-        - renesas,r8a77980-rpc-if       # R-Car V3H
-        - renesas,r8a77995-rpc-if       # R-Car D3
-      - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 device
+          - renesas,r8a77970-rpc-if       # R-Car V3M
+          - renesas,r8a77980-rpc-if       # R-Car V3H
+          - renesas,r8a77995-rpc-if       # R-Car D3
+      - const: renesas,rcar-gen3-rpc-if   # a generic R-Car gen3 device
 
   reg:
     items:
diff --git a/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml b/Documentation/devicetree/bindings/memory-controllers/st,stm32-fmc2-ebi.yaml
new file mode 100644 (file)
index 0000000..70eaf73
--- /dev/null
@@ -0,0 +1,252 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/st,stm32-fmc2-ebi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics Flexible Memory Controller 2 (FMC2) Bindings
+
+description: |
+  The FMC2 functional block makes the interface with: synchronous and
+  asynchronous static devices (such as PSNOR, PSRAM or other memory-mapped
+  peripherals) and NAND flash memories.
+  Its main purposes are:
+    - to translate AXI transactions into the appropriate external device
+      protocol
+    - to meet the access time requirements of the external devices
+  All external devices share the addresses, data and control signals with the
+  controller. Each external device is accessed by means of a unique Chip
+  Select. The FMC2 performs only one access at a time to an external device.
+
+maintainers:
+  - Christophe Kerello <christophe.kerello@st.com>
+
+properties:
+  compatible:
+    const: st,stm32mp1-fmc2-ebi
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  "#address-cells":
+    const: 2
+
+  "#size-cells":
+    const: 1
+
+  ranges:
+    description: |
+      Reflects the memory layout with four integer values per bank. Format:
+      <bank-number> 0 <address of the bank> <size>
+
+patternProperties:
+  "^.*@[0-4],[a-f0-9]+$":
+    type: object
+
+    properties:
+      reg:
+        description: Bank number, base address and size of the device.
+
+      st,fmc2-ebi-cs-transaction-type:
+        description: |
+          Select one of the transactions type supported
+          0: Asynchronous mode 1 SRAM/FRAM.
+          1: Asynchronous mode 1 PSRAM.
+          2: Asynchronous mode A SRAM/FRAM.
+          3: Asynchronous mode A PSRAM.
+          4: Asynchronous mode 2 NOR.
+          5: Asynchronous mode B NOR.
+          6: Asynchronous mode C NOR.
+          7: Asynchronous mode D NOR.
+          8: Synchronous read synchronous write PSRAM.
+          9: Synchronous read asynchronous write PSRAM.
+          10: Synchronous read synchronous write NOR.
+          11: Synchronous read asynchronous write NOR.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        minimum: 0
+        maximum: 11
+
+      st,fmc2-ebi-cs-cclk-enable:
+        description: Continuous clock enable (first bank must be configured
+          in synchronous mode). The FMC_CLK is generated continuously
+          during asynchronous and synchronous access. By default, the
+          FMC_CLK is only generated during synchronous access.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-mux-enable:
+        description: Address/Data multiplexed on databus (valid only with
+          NOR and PSRAM transactions type). By default, Address/Data
+          are not multiplexed.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-buswidth:
+        description: Data bus width
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [ 8, 16 ]
+        default: 16
+
+      st,fmc2-ebi-cs-waitpol-high:
+        description: Wait signal polarity (NWAIT signal active high).
+          By default, NWAIT is active low.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-waitcfg-enable:
+        description: The NWAIT signal indicates wheither the data from the
+          device are valid or if a wait state must be inserted when accessing
+          the device in synchronous mode. By default, the NWAIT signal is
+          active one data cycle before wait state.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-wait-enable:
+        description: The NWAIT signal is enabled (its level is taken into
+          account after the programmed latency period to insert wait states
+          if asserted). By default, the NWAIT signal is disabled.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-asyncwait-enable:
+        description: The NWAIT signal is taken into account during asynchronous
+          transactions. By default, the NWAIT signal is not taken into account
+          during asynchronous transactions.
+        $ref: /schemas/types.yaml#/definitions/flag
+
+      st,fmc2-ebi-cs-cpsize:
+        description: CRAM page size. The controller splits the burst access
+          when the memory page is reached. By default, no burst split when
+          crossing page boundary.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [ 0, 128, 256, 512, 1024 ]
+        default: 0
+
+      st,fmc2-ebi-cs-byte-lane-setup-ns:
+        description: This property configures the byte lane setup timing
+          defined in nanoseconds from NBLx low to Chip Select NEx low.
+
+      st,fmc2-ebi-cs-address-setup-ns:
+        description: This property defines the duration of the address setup
+          phase in nanoseconds used for asynchronous read/write transactions.
+
+      st,fmc2-ebi-cs-address-hold-ns:
+        description: This property defines the duration of the address hold
+          phase in nanoseconds used for asynchronous multiplexed read/write
+          transactions.
+
+      st,fmc2-ebi-cs-data-setup-ns:
+        description: This property defines the duration of the data setup phase
+          in nanoseconds used for asynchronous read/write transactions.
+
+      st,fmc2-ebi-cs-bus-turnaround-ns:
+        description: This property defines the delay in nanoseconds between the
+          end of current read/write transaction and the next transaction.
+
+      st,fmc2-ebi-cs-data-hold-ns:
+        description: This property defines the duration of the data hold phase
+          in nanoseconds used for asynchronous read/write transactions.
+
+      st,fmc2-ebi-cs-clk-period-ns:
+        description: This property defines the FMC_CLK output signal period in
+          nanoseconds.
+
+      st,fmc2-ebi-cs-data-latency-ns:
+        description: This property defines the data latency before reading or
+          writing the first data in nanoseconds.
+
+      st,fmc2_ebi-cs-write-address-setup-ns:
+        description: This property defines the duration of the address setup
+          phase in nanoseconds used for asynchronous write transactions.
+
+      st,fmc2-ebi-cs-write-address-hold-ns:
+        description: This property defines the duration of the address hold
+          phase in nanoseconds used for asynchronous multiplexed write
+          transactions.
+
+      st,fmc2-ebi-cs-write-data-setup-ns:
+        description: This property defines the duration of the data setup
+          phase in nanoseconds used for asynchronous write transactions.
+
+      st,fmc2-ebi-cs-write-bus-turnaround-ns:
+        description: This property defines the delay between the end of current
+          write transaction and the next transaction in nanoseconds.
+
+      st,fmc2-ebi-cs-write-data-hold-ns:
+        description: This property defines the duration of the data hold phase
+          in nanoseconds used for asynchronous write transactions.
+
+      st,fmc2-ebi-cs-max-low-pulse-ns:
+        description: This property defines the maximum chip select low pulse
+          duration in nanoseconds for synchronous transactions. When this timing
+          reaches 0, the controller splits the current access, toggles NE to
+          allow device refresh and restarts a new access.
+
+    required:
+      - reg
+
+required:
+  - "#address-cells"
+  - "#size-cells"
+  - compatible
+  - reg
+  - clocks
+  - ranges
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    memory-controller@58002000 {
+      #address-cells = <2>;
+      #size-cells = <1>;
+      compatible = "st,stm32mp1-fmc2-ebi";
+      reg = <0x58002000 0x1000>;
+      clocks = <&rcc FMC_K>;
+      resets = <&rcc FMC_R>;
+
+      ranges = <0 0 0x60000000 0x04000000>, /* EBI CS 1 */
+               <1 0 0x64000000 0x04000000>, /* EBI CS 2 */
+               <2 0 0x68000000 0x04000000>, /* EBI CS 3 */
+               <3 0 0x6c000000 0x04000000>, /* EBI CS 4 */
+               <4 0 0x80000000 0x10000000>; /* NAND */
+
+      psram@0,0 {
+        compatible = "mtd-ram";
+        reg = <0 0x00000000 0x100000>;
+        bank-width = <2>;
+
+        st,fmc2-ebi-cs-transaction-type = <1>;
+        st,fmc2-ebi-cs-address-setup-ns = <60>;
+        st,fmc2-ebi-cs-data-setup-ns = <30>;
+        st,fmc2-ebi-cs-bus-turnaround-ns = <5>;
+      };
+
+      nand-controller@4,0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        compatible = "st,stm32mp1-fmc2-nfc";
+        reg = <4 0x00000000 0x1000>,
+              <4 0x08010000 0x1000>,
+              <4 0x08020000 0x1000>,
+              <4 0x01000000 0x1000>,
+              <4 0x09010000 0x1000>,
+              <4 0x09020000 0x1000>;
+        interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+        dmas = <&mdma1 20 0x2 0x12000a02 0x0 0x0>,
+               <&mdma1 20 0x2 0x12000a08 0x0 0x0>,
+               <&mdma1 21 0x2 0x12000a0a 0x0 0x0>;
+        dma-names = "tx", "rx", "ecc";
+
+        nand@0 {
+          reg = <0>;
+          nand-on-flash-bbt;
+          #address-cells = <1>;
+          #size-cells = <1>;
+        };
+      };
+    };
+
+...
index a5531f6..499c62c 100644 (file)
@@ -98,11 +98,11 @@ allOf:
           description:
             Databus power supply.
   - if:
-     properties:
-       compatible:
-         contains:
-           enum:
-             - cirrus,cs47l15
+      properties:
+        compatible:
+          contains:
+            enum:
+              - cirrus,cs47l15
     then:
       required:
         - MICVDD-supply
@@ -174,24 +174,24 @@ properties:
         "mclk3" For the clock supplied on MCLK3.
     oneOf:
       - items:
-        - const: mclk1
+          - const: mclk1
       - items:
-        - const: mclk2
+          - const: mclk2
       - items:
-        - const: mclk3
+          - const: mclk3
       - items:
-        - const: mclk1
-        - const: mclk2
+          - const: mclk1
+          - const: mclk2
       - items:
-        - const: mclk1
-        - const: mclk3
+          - const: mclk1
+          - const: mclk3
       - items:
-        - const: mclk2
-        - const: mclk3
+          - const: mclk2
+          - const: mclk3
       - items:
-        - const: mclk1
-        - const: mclk2
-        - const: mclk3
+          - const: mclk1
+          - const: mclk2
+          - const: mclk3
 
   AVDD-supply:
     description:
diff --git a/Documentation/devicetree/bindings/mfd/cros-ec.txt b/Documentation/devicetree/bindings/mfd/cros-ec.txt
deleted file mode 100644 (file)
index 4860eab..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-ChromeOS Embedded Controller
-
-Google's ChromeOS EC is a Cortex-M device which talks to the AP and
-implements various function such as keyboard and battery charging.
-
-The EC can be connect through various means (I2C, SPI, LPC, RPMSG) and the
-compatible string used depends on the interface. Each connection method has
-its own driver which connects to the top level interface-agnostic EC driver.
-Other Linux driver (such as cros-ec-keyb for the matrix keyboard) connect to
-the top-level driver.
-
-Required properties (I2C):
-- compatible: "google,cros-ec-i2c"
-- reg: I2C slave address
-
-Required properties (SPI):
-- compatible: "google,cros-ec-spi"
-- reg: SPI chip select
-
-Required properties (RPMSG):
-- compatible: "google,cros-ec-rpmsg"
-
-Optional properties (SPI):
-- google,cros-ec-spi-pre-delay: Some implementations of the EC need a little
-  time to wake up from sleep before they can receive SPI transfers at a high
-  clock rate. This property specifies the delay, in usecs, between the
-  assertion of the CS to the start of the first clock pulse.
-- google,cros-ec-spi-msg-delay: Some implementations of the EC require some
-  additional processing time in order to accept new transactions. If the delay
-  between transactions is not long enough the EC may not be able to respond
-  properly to subsequent transactions and cause them to hang. This property
-  specifies the delay, in usecs, introduced between transactions to account
-  for the time required by the EC to get back into a state in which new data
-  can be accepted.
-
-Required properties (LPC):
-- compatible: "google,cros-ec-lpc"
-- reg: List of (IO address, size) pairs defining the interface uses
-
-Optional properties (all):
-- google,has-vbc-nvram: Some implementations of the EC include a small
-  nvram space used to store verified boot context data. This boolean flag
-  is used to specify whether this nvram is present or not.
-
-Example for I2C:
-
-i2c@12ca0000 {
-       cros-ec@1e {
-               reg = <0x1e>;
-               compatible = "google,cros-ec-i2c";
-               interrupts = <14 0>;
-               interrupt-parent = <&wakeup_eint>;
-               wakeup-source;
-       };
-
-
-Example for SPI:
-
-spi@131b0000 {
-       ec@0 {
-               compatible = "google,cros-ec-spi";
-               reg = <0x0>;
-               interrupts = <14 0>;
-               interrupt-parent = <&wakeup_eint>;
-               wakeup-source;
-               spi-max-frequency = <5000000>;
-               controller-data {
-               cs-gpio = <&gpf0 3 4 3 0>;
-               samsung,spi-cs;
-               samsung,spi-feedback-delay = <2>;
-               };
-       };
-};
-
-
-Example for LPC is not supplied as it is not yet implemented.
index 487a844..9b6eb50 100644 (file)
@@ -79,18 +79,19 @@ properties:
             description: |
               conversion mode:
                 0 - temperature, in C*10
-                1 - pre-scaled voltage value
+                1 - pre-scaled 24-bit voltage value
                 2 - scaled voltage based on an optional resistor divider
                     and optional offset
+                3 - pre-scaled 16-bit voltage value
             $ref: /schemas/types.yaml#/definitions/uint32
-            enum: [0, 1, 2]
+            enum: [0, 1, 2, 3]
 
           gw,voltage-divider-ohms:
             description: Values of resistors for divider on raw ADC input
             maxItems: 2
             items:
-             minimum: 1000
-             maximum: 1000000
+              minimum: 1000
+              maximum: 1000000
 
           gw,voltage-offset-microvolt:
             description: |
diff --git a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
new file mode 100644 (file)
index 0000000..6a7279a
--- /dev/null
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/google,cros-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ChromeOS Embedded Controller
+
+maintainers:
+  - Benson Leung <bleung@chromium.org>
+  - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+  - Guenter Roeck <groeck@chromium.org>
+
+description:
+  Google's ChromeOS EC is a microcontroller which talks to the AP and
+  implements various functions such as keyboard and battery charging.
+  The EC can be connected through various interfaces (I2C, SPI, and others)
+  and the compatible string specifies which interface is being used.
+
+properties:
+  compatible:
+    oneOf:
+      - description:
+          For implementations of the EC is connected through I2C.
+        const: google,cros-ec-i2c
+      - description:
+          For implementations of the EC is connected through SPI.
+        const: google,cros-ec-spi
+      - description:
+          For implementations of the EC is connected through RPMSG.
+        const: google,cros-ec-rpmsg
+
+  google,cros-ec-spi-pre-delay:
+    description:
+      This property specifies the delay in usecs between the
+      assertion of the CS and the first clock pulse.
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - default: 0
+      - minimum: 0
+
+  google,cros-ec-spi-msg-delay:
+    description:
+      This property specifies the delay in usecs between messages.
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32
+      - default: 0
+      - minimum: 0
+
+  google,has-vbc-nvram:
+    description:
+      Some implementations of the EC include a small nvram space used to
+      store verified boot context data. This boolean flag is used to specify
+      whether this nvram is present or not.
+    type: boolean
+
+  spi-max-frequency:
+    description: Maximum SPI frequency of the device in Hz.
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+
+if:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - google,cros-ec-i2c
+          - google,cros-ec-rpmsg
+then:
+  properties:
+    google,cros-ec-spi-pre-delay: false
+    google,cros-ec-spi-msg-delay: false
+    spi-max-frequency: false
+
+additionalProperties: false
+
+examples:
+  # Example for I2C
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        cros-ec@1e {
+            compatible = "google,cros-ec-i2c";
+            reg = <0x1e>;
+            interrupts = <6 0>;
+            interrupt-parent = <&gpio0>;
+        };
+    };
+
+  # Example for SPI
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    spi0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        cros-ec@0 {
+            compatible = "google,cros-ec-spi";
+            reg = <0x0>;
+            google,cros-ec-spi-msg-delay = <30>;
+            google,cros-ec-spi-pre-delay = <10>;
+            interrupts = <99 0>;
+            interrupt-parent = <&gpio7>;
+            spi-max-frequency = <5000000>;
+        };
+    };
+
+  # Example for RPMSG
+  - |
+    scp0 {
+        cros-ec {
+            compatible = "google,cros-ec-rpmsg";
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml b/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
new file mode 100644 (file)
index 0000000..a3b976f
--- /dev/null
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/khadas,mcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Khadas on-board Microcontroller Device Tree Bindings
+
+maintainers:
+  - Neil Armstrong <narmstrong@baylibre.com>
+
+description: |
+  Khadas embeds a microcontroller on their VIM and Edge boards adding some
+  system feature as PWM Fan control (for VIM2 rev14 or VIM3), User memory
+  storage, IR/Key resume control, system power LED control and more.
+
+properties:
+  compatible:
+    enum:
+      - khadas,mcu # MCU revision is discoverable
+
+  "#cooling-cells": # Only needed for boards having FAN control feature
+    const: 2
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      khadas_mcu: system-controller@18 {
+        compatible = "khadas,mcu";
+        reg = <0x18>;
+        #cooling-cells = <2>;
+      };
+    };
index e675611..8bcea8d 100644 (file)
@@ -33,6 +33,9 @@ properties:
     items:
       - const: mux
 
+  interrupts:
+    maxItems: 1
+
   "#address-cells":
     const: 1
 
@@ -106,11 +109,13 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
     timer@40002400 {
       compatible = "st,stm32-lptimer";
       reg = <0x40002400 0x400>;
       clocks = <&timer_clk>;
       clock-names = "mux";
+      interrupts-extended = <&exti 47 IRQ_TYPE_LEVEL_HIGH>;
       #address-cells = <1>;
       #size-cells = <0>;
 
diff --git a/Documentation/devicetree/bindings/mfd/st,stmfx.yaml b/Documentation/devicetree/bindings/mfd/st,stmfx.yaml
new file mode 100644 (file)
index 0000000..888ab4b
--- /dev/null
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/st,stmfx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectonics Multi-Function eXpander (STMFX) bindings
+
+description: ST Multi-Function eXpander (STMFX) is a slave controller using I2C for
+               communication with the main MCU. Its main features are GPIO expansion,
+               main MCU IDD measurement (IDD is the amount of current that flows
+               through VDD) and resistive touchscreen controller.
+
+maintainers:
+  - Amelie Delaunay <amelie.delaunay@st.com>
+
+properties:
+  compatible:
+    const: st,stmfx-0300
+
+  reg:
+    enum: [ 0x42, 0x43 ]
+
+  interrupts:
+    maxItems: 1
+
+  drive-open-drain: true
+
+  vdd-supply:
+    maxItems: 1
+
+  pinctrl:
+    type: object
+
+    properties:
+      compatible:
+        const: st,stmfx-0300-pinctrl
+
+      "#gpio-cells":
+        const: 2
+
+      "#interrupt-cells":
+        const: 2
+
+      gpio-controller: true
+
+      interrupt-controller: true
+
+      gpio-ranges:
+        description: if all STMFX pins[24:0] are available (no other STMFX function in use),
+                     you should use gpio-ranges = <&stmfx_pinctrl 0 0 24>;
+                     if agpio[3:0] are not available (STMFX Touchscreen function in use),
+                     you should use gpio-ranges = <&stmfx_pinctrl 0 0 16>, <&stmfx_pinctrl 20 20 4>;
+                     if agpio[7:4] are not available (STMFX IDD function in use),
+                     you should use gpio-ranges = <&stmfx_pinctrl 0 0 20>;
+        maxItems: 1
+
+    patternProperties:
+      "^[a-zA-Z]*-pins$":
+        type: object
+
+        allOf:
+          - $ref: ../pinctrl/pinmux-node.yaml
+
+        properties:
+          pins: true
+          bias-disable: true
+          bias-pull-up: true
+          bias-pull-pin-default: true
+          bias-pull-down: true
+          drive-open-drain: true
+          drive-push-pull: true
+          output-high: true
+          output-low: true
+
+    additionalProperties: false
+
+    required:
+      - compatible
+      - "#gpio-cells"
+      - "#interrupt-cells"
+      - gpio-controller
+      - interrupt-controller
+      - gpio-ranges
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    i2c {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      stmfx@42 {
+        compatible = "st,stmfx-0300";
+        reg = <0x42>;
+        interrupts = <8 IRQ_TYPE_EDGE_RISING>;
+        interrupt-parent = <&gpioi>;
+        vdd-supply = <&v3v3>;
+
+        stmfx_pinctrl: pinctrl {
+          compatible = "st,stmfx-0300-pinctrl";
+          #gpio-cells = <2>;
+          #interrupt-cells = <2>;
+          gpio-controller;
+          interrupt-controller;
+          gpio-ranges = <&stmfx_pinctrl 0 0 24>;
+
+          joystick_pins: joystick-pins {
+            pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
+            drive-push-pull;
+            bias-pull-up;
+          };
+        };
+      };
+    };
+...
index dd995d7..305123e 100644 (file)
@@ -113,8 +113,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
@@ -135,8 +135,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
@@ -154,8 +154,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
@@ -172,8 +172,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
@@ -198,8 +198,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
@@ -220,8 +220,8 @@ properties:
             maxItems: 1
 
           st,mask-reset:
-            description: mask reset for this regulator,
-                         the regulator configuration is maintained during pmic reset.
+            description: mask reset for this regulator, the regulator configuration
+              is maintained during pmic reset.
             $ref: /schemas/types.yaml#/definitions/flag
 
           regulator-name: true
diff --git a/Documentation/devicetree/bindings/mfd/stmfx.txt b/Documentation/devicetree/bindings/mfd/stmfx.txt
deleted file mode 100644 (file)
index f0c2f7f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-STMicroelectonics Multi-Function eXpander (STMFX) Core bindings
-
-ST Multi-Function eXpander (STMFX) is a slave controller using I2C for
-communication with the main MCU. Its main features are GPIO expansion, main
-MCU IDD measurement (IDD is the amount of current that flows through VDD) and
-resistive touchscreen controller.
-
-Required properties:
-- compatible: should be "st,stmfx-0300".
-- reg: I2C slave address of the device.
-- interrupts: interrupt specifier triggered by MFX_IRQ_OUT signal.
-  Please refer to ../interrupt-controller/interrupt.txt
-
-Optional properties:
-- drive-open-drain: configure MFX_IRQ_OUT as open drain.
-- vdd-supply: phandle of the regulator supplying STMFX.
-
-Example:
-
-       stmfx: stmfx@42 {
-               compatible = "st,stmfx-0300";
-               reg = <0x42>;
-               interrupts = <8 IRQ_TYPE_EDGE_RISING>;
-               interrupt-parent = <&gpioi>;
-               vdd-supply = <&v3v3>;
-       };
-
-Please refer to ../pinctrl/pinctrl-stmfx.txt for STMFX GPIO expander function bindings.
index 03d0a23..c8fd5d3 100644 (file)
@@ -24,12 +24,11 @@ maintainers:
 
 properties:
   compatible:
-    anyOf:
-      - items:
-        - enum:
-           - ti,j721e-system-controller
-        - const: syscon
-        - const: simple-mfd
+    items:
+      - enum:
+          - ti,j721e-system-controller
+      - const: syscon
+      - const: simple-mfd
 
   "#address-cells":
     const: 1
index 56f244b..c2f9302 100644 (file)
@@ -26,7 +26,7 @@ Optional node:
 Example:
 /*
  * Integrated Power Management Chip
- * http://www.ti.com/lit/ds/symlink/twl6030.pdf
+ * https://www.ti.com/lit/ds/symlink/twl6030.pdf
  */
 twl@48 {
     compatible = "ti,twl6030";
index 4c0106c..9e762d4 100644 (file)
@@ -73,13 +73,13 @@ allOf:
       required:
         - DBVDD3-supply
   - if:
-     properties:
-       compatible:
-         contains:
-           enum:
-             - cirrus,cs47l24
-             - wlf,wm1831
-             - wlf,wm8997
+      properties:
+        compatible:
+          contains:
+            enum:
+              - cirrus,cs47l24
+              - wlf,wm1831
+              - wlf,wm8997
     then:
       properties:
         SPKVDD-supply:
@@ -183,12 +183,12 @@ properties:
       clock supplied on MCLK2, recommended to be an always on 32k clock.
     oneOf:
       - items:
-        - const: mclk1
+          - const: mclk1
       - items:
-        - const: mclk2
+          - const: mclk2
       - items:
-        - const: mclk1
-        - const: mclk2
+          - const: mclk1
+          - const: mclk2
 
   reset-gpios:
     maxItems: 1
index 7a386a5..0cd74c3 100644 (file)
@@ -21,9 +21,9 @@ properties:
   compatible:
     items:
       - enum:
-        - amlogic,meson8-sdhc
-        - amlogic,meson8b-sdhc
-        - amlogic,meson8m2-sdhc
+          - amlogic,meson8-sdhc
+          - amlogic,meson8b-sdhc
+          - amlogic,meson8m2-sdhc
       - const: amlogic,meson-mx-sdhc
 
   reg:
index 75dc116..10b4596 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale Enhanced Secure Digital Host Controller (eSDHC) for i.MX
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 allOf:
   - $ref: "mmc-controller.yaml"
index e60bfe9..9b63df1 100644 (file)
@@ -16,14 +16,14 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-mmc
-        - ingenic,jz4725b-mmc
-        - ingenic,jz4760-mmc
-        - ingenic,jz4780-mmc
-        - ingenic,x1000-mmc
+          - ingenic,jz4740-mmc
+          - ingenic,jz4725b-mmc
+          - ingenic,jz4760-mmc
+          - ingenic,jz4780-mmc
+          - ingenic,x1000-mmc
       - items:
-        - const: ingenic,jz4770-mmc
-        - const: ingenic,jz4760-mmc
+          - const: ingenic,jz4770-mmc
+          - const: ingenic,jz4760-mmc
 
   reg:
     maxItems: 1
index 1cccc04..bec8f8c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale MXS MMC controller
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 description: |
   The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
index e5dbc20..b4c3fd4 100644 (file)
@@ -130,9 +130,9 @@ then:
   required:
     - clock-names
   description:
-     The internal card detection logic that exists in these controllers is
-     sectioned off to be run by a separate second clock source to allow
-     the main core clock to be turned off to save power.
+    The internal card detection logic that exists in these controllers is
+    sectioned off to be run by a separate second clock source to allow
+    the main core clock to be turned off to save power.
 
 unevaluatedProperties: false
 
index cb9794e..b328769 100644 (file)
@@ -14,12 +14,10 @@ maintainers:
 
 properties:
   compatible:
-    oneOf:
-      - items:
-        - enum:
+    items:
+      - enum:
           - xlnx,zynqmp-nand-controller
-        - enum:
-          - arasan,nfc-v3p10
+      - const: arasan,nfc-v3p10
 
   reg:
     maxItems: 1
index cfb18ab..edebeae 100644 (file)
@@ -4,8 +4,8 @@ This file provides information, what the device node for the davinci/keystone
 NAND interface contains.
 
 Documentation:
-Davinci DM646x - http://www.ti.com/lit/ug/sprueq7c/sprueq7c.pdf
-Kestone - http://www.ti.com/lit/ug/sprugz3a/sprugz3a.pdf
+Davinci DM646x - https://www.ti.com/lit/ug/sprueq7c/sprueq7c.pdf
+Kestone - https://www.ti.com/lit/ug/sprugz3a/sprugz3a.pdf
 
 Required properties:
 
index fce4894..25f07c1 100644 (file)
@@ -7,14 +7,16 @@ Required properties:
 - fsl,upm-cmd-offset : UPM pattern offset for the command latch.
 
 Optional properties:
-- fsl,upm-wait-flags : add chip-dependent short delays after running the
-       UPM pattern (0x1), after writing a data byte (0x2) or after
-       writing out a buffer (0x4).
 - fsl,upm-addr-line-cs-offsets : address offsets for multi-chip support.
        The corresponding address lines are used to select the chip.
 - gpios : may specify optional GPIOs connected to the Ready-Not-Busy pins
        (R/B#). For multi-chip devices, "n" GPIO definitions are required
        according to the number of chips.
+
+Deprecated properties:
+- fsl,upm-wait-flags : add chip-dependent short delays after running the
+       UPM pattern (0x1), after writing a data byte (0x2) or after
+       writing out a buffer (0x4).
 - chip-delay : chip dependent delay for transferring data from array to
        read registers (tR). Required if property "gpios" is not used
        (R/B# pins not connected).
@@ -52,8 +54,6 @@ upm@3,0 {
        fsl,upm-cmd-offset = <0x08>;
        /* Multi-chip NAND device */
        fsl,upm-addr-line-cs-offsets = <0x0 0x200>;
-       fsl,upm-wait-flags = <0x5>;
-       chip-delay = <25>; // in micro-seconds
 
        nand@0 {
                #address-cells = <1>;
index 354cb63..3201372 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/mtd/gpmi-nand.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title:  Freescale General-Purpose Media Interface (GPMI) binding
+title: Freescale General-Purpose Media Interface (GPMI) binding
 
 maintainers:
   - Han Xu <han.xu@nxp.com>
index ee4d1d0..73b86f2 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/mtd/mxc-nand.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title:  Freescale's mxc_nand binding
+title: Freescale's mxc_nand binding
 
 maintainers:
   - Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
index cde7c4d..40fc5b0 100644 (file)
@@ -114,6 +114,13 @@ patternProperties:
         description:
           Contains the native Ready/Busy IDs.
 
+      rb-gpios:
+        description:
+          Contains one or more GPIO descriptor (the numper of descriptor
+          depends on the number of R/B pins exposed by the flash) for the
+          Ready/Busy pins. Active state refers to the NAND ready state and
+          should be set to GPIOD_ACTIVE_HIGH unless the signal is inverted.
+
     required:
       - reg
 
index b059267..28a08ff 100644 (file)
@@ -9,32 +9,19 @@ title: STMicroelectronics Flexible Memory Controller 2 (FMC2) Bindings
 maintainers:
   - Christophe Kerello <christophe.kerello@st.com>
 
-allOf:
-  - $ref: "nand-controller.yaml#"
-
 properties:
   compatible:
-    const: st,stm32mp15-fmc2
+    enum:
+      - st,stm32mp15-fmc2
+      - st,stm32mp1-fmc2-nfc
 
   reg:
-    items:
-      - description: Registers
-      - description: Chip select 0 data
-      - description: Chip select 0 command
-      - description: Chip select 0 address space
-      - description: Chip select 1 data
-      - description: Chip select 1 command
-      - description: Chip select 1 address space
+    minItems: 6
+    maxItems: 7
 
   interrupts:
     maxItems: 1
 
-  clocks:
-    maxItems: 1
-
-  resets:
-    maxItems: 1
-
   dmas:
     items:
       - description: tx DMA channel
@@ -55,13 +42,57 @@ patternProperties:
         const: 512
 
       nand-ecc-strength:
-        enum: [1, 4 ,8 ]
+        enum: [1, 4, 8]
+
+allOf:
+  - $ref: "nand-controller.yaml#"
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32mp15-fmc2
+    then:
+      properties:
+        reg:
+          items:
+            - description: Registers
+            - description: Chip select 0 data
+            - description: Chip select 0 command
+            - description: Chip select 0 address space
+            - description: Chip select 1 data
+            - description: Chip select 1 command
+            - description: Chip select 1 address space
+
+        clocks:
+          maxItems: 1
+
+        resets:
+          maxItems: 1
+
+      required:
+        - clocks
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,stm32mp1-fmc2-nfc
+    then:
+      properties:
+        reg:
+          items:
+            - description: Chip select 0 data
+            - description: Chip select 0 command
+            - description: Chip select 0 address space
+            - description: Chip select 1 data
+            - description: Chip select 1 command
+            - description: Chip select 1 address space
 
 required:
   - compatible
   - reg
   - interrupts
-  - clocks
 
 examples:
   - |
@@ -77,13 +108,13 @@ examples:
             <0x81000000 0x1000>,
             <0x89010000 0x1000>,
             <0x89020000 0x1000>;
-            interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
-            dmas = <&mdma1 20 0x10 0x12000a02 0x0 0x0>,
-                   <&mdma1 20 0x10 0x12000a08 0x0 0x0>,
-                   <&mdma1 21 0x10 0x12000a0a 0x0 0x0>;
-            dma-names = "tx", "rx", "ecc";
-            clocks = <&rcc FMC_K>;
-            resets = <&rcc FMC_R>;
+      interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+      dmas = <&mdma1 20 0x2 0x12000a02 0x0 0x0>,
+             <&mdma1 20 0x2 0x12000a08 0x0 0x0>,
+             <&mdma1 21 0x2 0x12000a0a 0x0 0x0>;
+      dma-names = "tx", "rx", "ecc";
+      clocks = <&rcc FMC_K>;
+      resets = <&rcc FMC_R>;
       #address-cells = <1>;
       #size-cells = <0>;
 
index faea214..6a1ec50 100644 (file)
@@ -85,8 +85,8 @@ patternProperties:
 
 oneOf:
   - required:
-    - ports
+      - ports
   - required:
-    - ethernet-ports
+      - ethernet-ports
 
 ...
index 1c44740..fa2baca 100644 (file)
@@ -54,7 +54,8 @@ properties:
 
   phy-connection-type:
     description:
-      Operation mode of the PHY interface
+      Specifies interface type between the Ethernet device and a physical
+      layer (PHY) device.
     enum:
       # There is not a standard bus between the MAC and the PHY,
       # something proprietary is being used to embed the PHY in the
index a356127..8594f11 100644 (file)
@@ -43,7 +43,7 @@ description:
 
 properties:
   compatible:
-      const: "qcom,sdm845-ipa"
+    const: "qcom,sdm845-ipa"
 
   reg:
     items:
@@ -64,7 +64,7 @@ properties:
     maxItems: 1
 
   clock-names:
-      const: core
+    const: core
 
   interrupts:
     items:
@@ -96,8 +96,8 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle-array
     description: State bits used in by the AP to signal the modem.
     items:
-    - description: Whether the "ipa-clock-enabled" state bit is valid
-    - description: Whether the IPA clock is enabled (if valid)
+      - description: Whether the "ipa-clock-enabled" state bit is valid
+      - description: Whether the IPA clock is enabled (if valid)
 
   qcom,smem-state-names:
     $ref: /schemas/types.yaml#/definitions/string-array
@@ -140,9 +140,9 @@ required:
 
 oneOf:
   - required:
-    - modem-init
+      - modem-init
   - required:
-    - memory-region
+      - memory-region
 
 examples:
   - |
index 7d84a86..cbacc04 100644 (file)
@@ -46,10 +46,10 @@ properties:
   clock-names:
     oneOf:
       - items:          # for Pro4
-        - const: gio
-        - const: ether
-        - const: ether-gb
-        - const: ether-phy
+          - const: gio
+          - const: ether
+          - const: ether-gb
+          - const: ether-phy
       - const: ether    # for others
 
   resets:
@@ -59,8 +59,8 @@ properties:
   reset-names:
     oneOf:
       - items:          # for Pro4
-        - const: gio
-        - const: ether
+          - const: gio
+          - const: ether
       - const: ether    # for others
 
   socionext,syscon-phy-mode:
index fafa34c..e5dff66 100644 (file)
@@ -48,11 +48,11 @@ properties:
     minItems: 3
     maxItems: 5
     items:
-        - description: GMAC main clock
-        - description: MAC TX clock
-        - description: MAC RX clock
-        - description: For MPU family, used for power mode
-        - description: For MPU family, used for PHY without quartz
+      - description: GMAC main clock
+      - description: MAC TX clock
+      - description: MAC RX clock
+      - description: For MPU family, used for power mode
+      - description: For MPU family, used for PHY without quartz
 
   clock-names:
     minItems: 3
@@ -89,7 +89,7 @@ required:
   - st,syscon
 
 examples:
- - |
 - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
     #include <dt-bindings/clock/stm32mp1-clks.h>
     #include <dt-bindings/reset/stm32mp1-resets.h>
index 3ea0e12..dadeb8f 100644 (file)
@@ -35,7 +35,7 @@ properties:
   reg:
     maxItems: 1
     description:
-       The physical base address and size of full the CPSW module IO range
+      The physical base address and size of full the CPSW module IO range
 
   '#address-cells':
     const: 1
@@ -85,36 +85,36 @@ properties:
 
     patternProperties:
       "^port@[0-9]+$":
-          type: object
-          description: CPSW external ports
-
-          allOf:
-            - $ref: ethernet-controller.yaml#
-
-          properties:
-            reg:
-              items:
-                - enum: [1, 2]
-              description: CPSW port number
-
-            phys:
-              maxItems: 1
-              description:  phandle on phy-gmii-sel PHY
-
-            label:
-              description: label associated with this port
-
-            ti,dual-emac-pvid:
-              $ref: /schemas/types.yaml#/definitions/uint32
-              minimum: 1
-              maximum: 1024
-              description:
-                Specifies default PORT VID to be used to segregate
-                ports. Default value - CPSW port number.
-
-          required:
-            - reg
-            - phys
+        type: object
+        description: CPSW external ports
+
+        allOf:
+          - $ref: ethernet-controller.yaml#
+
+        properties:
+          reg:
+            items:
+              - enum: [1, 2]
+            description: CPSW port number
+
+          phys:
+            maxItems: 1
+            description: phandle on phy-gmii-sel PHY
+
+          label:
+            description: label associated with this port
+
+          ti,dual-emac-pvid:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            minimum: 1
+            maximum: 1024
+            description:
+              Specifies default PORT VID to be used to segregate
+              ports. Default value - CPSW port number.
+
+        required:
+          - reg
+          - phys
 
   cpts:
     type: object
index 1745793..227270c 100644 (file)
@@ -55,7 +55,7 @@ properties:
   reg:
     maxItems: 1
     description:
-       The physical base address and size of full the CPSW2G NUSS IO range
+      The physical base address and size of full the CPSW2G NUSS IO range
 
   reg-names:
     items:
@@ -100,38 +100,38 @@ properties:
 
     patternProperties:
       port@1:
-       type: object
-       description: CPSW2G NUSS external ports
-
-       $ref: ethernet-controller.yaml#
-
-       properties:
-         reg:
-           items:
-             - const: 1
-           description: CPSW port number
-
-         phys:
-           maxItems: 1
-           description:  phandle on phy-gmii-sel PHY
-
-         label:
-           description: label associated with this port
-
-         ti,mac-only:
-           $ref: /schemas/types.yaml#definitions/flag
-           description:
-             Specifies the port works in mac-only mode.
-
-         ti,syscon-efuse:
-           $ref: /schemas/types.yaml#definitions/phandle-array
-           description:
-             Phandle to the system control device node which provides access
-             to efuse IO range with MAC addresses
-
-       required:
-         - reg
-         - phys
+        type: object
+        description: CPSW2G NUSS external ports
+
+        $ref: ethernet-controller.yaml#
+
+        properties:
+          reg:
+            items:
+              - const: 1
+            description: CPSW port number
+
+          phys:
+            maxItems: 1
+            description: phandle on phy-gmii-sel PHY
+
+          label:
+            description: label associated with this port
+
+          ti,mac-only:
+            $ref: /schemas/types.yaml#definitions/flag
+            description:
+              Specifies the port works in mac-only mode.
+
+          ti,syscon-efuse:
+            $ref: /schemas/types.yaml#definitions/phandle-array
+            description:
+              Phandle to the system control device node which provides access
+              to efuse IO range with MAC addresses
+
+        required:
+          - reg
+          - phys
 
     additionalProperties: false
 
index fe9c7df..1c9d7f0 100644 (file)
@@ -21,18 +21,18 @@ properties:
   compatible:
     items:
       - enum:
-        - fsl,imx6q-ocotp
-        - fsl,imx6sl-ocotp
-        - fsl,imx6sx-ocotp
-        - fsl,imx6ul-ocotp
-        - fsl,imx6ull-ocotp
-        - fsl,imx7d-ocotp
-        - fsl,imx6sll-ocotp
-        - fsl,imx7ulp-ocotp
-        - fsl,imx8mq-ocotp
-        - fsl,imx8mm-ocotp
-        - fsl,imx8mn-ocotp
-        - fsl,imx8mp-ocotp
+          - fsl,imx6q-ocotp
+          - fsl,imx6sl-ocotp
+          - fsl,imx6sx-ocotp
+          - fsl,imx6ul-ocotp
+          - fsl,imx6ull-ocotp
+          - fsl,imx7d-ocotp
+          - fsl,imx6sll-ocotp
+          - fsl,imx7ulp-ocotp
+          - fsl,imx8mq-ocotp
+          - fsl,imx8mm-ocotp
+          - fsl,imx8mn-ocotp
+          - fsl,imx8mp-ocotp
       - const: syscon
 
   reg:
index d10a0cf..59aca6d 100644 (file)
@@ -46,8 +46,8 @@ properties:
     const: 1
 
 required:
-   - compatible
-   - reg
+  - compatible
+  - reg
 
 examples:
   - |
index 64b2c64..a1e2be7 100644 (file)
@@ -9,6 +9,14 @@ title: PCIe RC controller on Intel Gateway SoCs
 maintainers:
   - Dilip Kota <eswara.kota@linux.intel.com>
 
+select:
+  properties:
+    compatible:
+      contains:
+        const: intel,lgm-pcie
+  required:
+    - compatible
+
 properties:
   compatible:
     items:
index cfe25cf..b3c3d0c 100644 (file)
@@ -31,8 +31,7 @@ properties:
   ti,syscon-pcie-ctrl:
     description: Phandle to the SYSCON entry required for configuring PCIe mode
                  and link speed.
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   power-domains:
     maxItems: 1
index d7b6048..8200ba0 100644 (file)
@@ -31,8 +31,7 @@ properties:
   ti,syscon-pcie-ctrl:
     description: Phandle to the SYSCON entry required for configuring PCIe mode
       and link speed.
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   power-domains:
     maxItems: 1
index 9e32cb4..0d2557b 100644 (file)
@@ -37,9 +37,9 @@ properties:
     const: 0
 
   phy-supply:
-     description:
-       Phandle to a regulator that provides power to the PHY. This
-       regulator will be managed during the PHY power on/off sequence.
+    description:
+      Phandle to a regulator that provides power to the PHY. This
+      regulator will be managed during the PHY power on/off sequence.
 
 required:
   - compatible
index cb71561..fb29ad8 100644 (file)
@@ -100,9 +100,9 @@ properties:
           - const: linestate
           - const: otg-mux
           - items:
-            - const: otg-bvalid
-            - const: otg-id
-            - const: linestate
+              - const: otg-bvalid
+              - const: otg-id
+              - const: linestate
 
       phy-supply:
         description:
index e4cd4a1..185cdea 100644 (file)
@@ -37,7 +37,7 @@ properties:
       - description: Address and length of PHY's common serdes block.
 
   "#clock-cells":
-     enum: [ 1, 2 ]
+    enum: [ 1, 2 ]
 
   "#address-cells":
     enum: [ 1, 2 ]
@@ -65,16 +65,15 @@ properties:
 
   vdda-phy-supply:
     description:
-        Phandle to a regulator supply to PHY core block.
+      Phandle to a regulator supply to PHY core block.
 
   vdda-pll-supply:
     description:
-        Phandle to 1.8V regulator supply to PHY refclk pll block.
+      Phandle to 1.8V regulator supply to PHY refclk pll block.
 
   vddp-ref-clk-supply:
     description:
-        Phandle to a regulator supply to any specific refclk
-        pll block.
+      Phandle to a regulator supply to any specific refclk pll block.
 
 #Required nodes:
 patternProperties:
@@ -184,8 +183,8 @@ allOf:
             - description: phy common block reset.
         reset-names:
           items:
-             - const: phy
-             - const: common
+            - const: phy
+            - const: common
   - if:
       properties:
         compatible:
index 6e24875..ef8ae9f 100644 (file)
@@ -26,7 +26,7 @@ properties:
       - const: dp_com
 
   "#clock-cells":
-     enum: [ 1, 2 ]
+    enum: [ 1, 2 ]
 
   "#address-cells":
     enum: [ 1, 2 ]
@@ -62,16 +62,15 @@ properties:
 
   vdda-phy-supply:
     description:
-        Phandle to a regulator supply to PHY core block.
+      Phandle to a regulator supply to PHY core block.
 
   vdda-pll-supply:
     description:
-        Phandle to 1.8V regulator supply to PHY refclk pll block.
+      Phandle to 1.8V regulator supply to PHY refclk pll block.
 
   vddp-ref-clk-supply:
     description:
-        Phandle to a regulator supply to any specific refclk
-        pll block.
+      Phandle to a regulator supply to any specific refclk pll block.
 
 #Required nodes:
 patternProperties:
index 9ba62dc..ccda928 100644 (file)
@@ -17,15 +17,15 @@ properties:
   compatible:
     oneOf:
       - items:
-        - enum:
-          - qcom,ipq8074-qusb2-phy
-          - qcom,msm8996-qusb2-phy
-          - qcom,msm8998-qusb2-phy
+          - enum:
+              - qcom,ipq8074-qusb2-phy
+              - qcom,msm8996-qusb2-phy
+              - qcom,msm8998-qusb2-phy
       - items:
-        - enum:
-          - qcom,sc7180-qusb2-phy
-          - qcom,sdm845-qusb2-phy
-        - const: qcom,qusb2-v2-phy
+          - enum:
+              - qcom,sc7180-qusb2-phy
+              - qcom,sdm845-qusb2-phy
+          - const: qcom,qusb2-v2-phy
   reg:
     maxItems: 1
 
@@ -49,12 +49,12 @@ properties:
       - const: iface
 
   vdda-pll-supply:
-     description:
-       Phandle to 1.8V regulator supply to PHY refclk pll block.
+    description:
+      Phandle to 1.8V regulator supply to PHY refclk pll block.
 
   vdda-phy-dpdm-supply:
-     description:
-       Phandle to 3.1V regulator supply to Dp/Dm port signals.
+    description:
+      Phandle to 3.1V regulator supply to Dp/Dm port signals.
 
   resets:
     maxItems: 1
@@ -64,12 +64,12 @@ properties:
   nvmem-cells:
     maxItems: 1
     description:
-        Phandle to nvmem cell that contains 'HS Tx trim'
-        tuning parameter value for qusb2 phy.
+      Phandle to nvmem cell that contains 'HS Tx trim'
+      tuning parameter value for qusb2 phy.
 
   qcom,tcsr-syscon:
     description:
-        Phandle to TCSR syscon register region.
+      Phandle to TCSR syscon register region.
     $ref: /schemas/types.yaml#/definitions/phandle
 
 if:
index 86f4909..a06831f 100644 (file)
@@ -33,8 +33,8 @@ properties:
   clock-names:
     oneOf:
       - items:            # for Pro5
-        - const: gio
-        - const: link
+          - const: gio
+          - const: link
       - const: link       # for others
 
   resets:
@@ -44,8 +44,8 @@ properties:
   reset-names:
     oneOf:
       - items:            # for Pro5
-        - const: gio
-        - const: link
+          - const: gio
+          - const: link
       - const: link       # for others
 
   socionext,syscon:
index c871d46..6fa5caa 100644 (file)
@@ -37,12 +37,12 @@ properties:
     oneOf:
       - const: link          # for PXs2
       - items:               # for PXs3 with phy-ext
-        - const: link
-        - const: phy
-        - const: phy-ext
+          - const: link
+          - const: phy
+          - const: phy-ext
       - items:               # for others
-        - const: link
-        - const: phy
+          - const: link
+          - const: phy
 
   resets:
     maxItems: 2
index edff2c9..9d46715 100644 (file)
@@ -37,15 +37,15 @@ properties:
   clock-names:
     oneOf:
       - items:             # for Pro4, Pro5
-        - const: gio
-        - const: link
+          - const: gio
+          - const: link
       - items:             # for PXs3 with phy-ext
-        - const: link
-        - const: phy
-        - const: phy-ext
+          - const: link
+          - const: phy
+          - const: phy-ext
       - items:             # for others
-        - const: link
-        - const: phy
+          - const: link
+          - const: phy
 
   resets:
     maxItems: 2
@@ -53,11 +53,11 @@ properties:
   reset-names:
     oneOf:
       - items:              # for Pro4,Pro5
-        - const: gio
-        - const: link
+          - const: gio
+          - const: link
       - items:              # for others
-        - const: link
-        - const: phy
+          - const: link
+          - const: phy
 
   vbus-supply:
     description: A phandle to the regulator for USB VBUS
index 3f913d6..5ffc95c 100644 (file)
@@ -203,7 +203,8 @@ examples:
            };
 
            refclk-dig {
-                  clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
+                  clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, 
+                          <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>;
                   #clock-cells = <0>;
                   assigned-clocks = <&wiz0_refclk_dig>;
                   assigned-clock-parents = <&k3_clks 292 11>;
index 017d959..54631dc 100644 (file)
@@ -34,22 +34,22 @@ patternProperties:
       patternProperties:
         "^function|groups$":
           $ref: "/schemas/types.yaml#/definitions/string"
-          enum: [ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-            ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
-            EXTRST, FLACK, FLBUSY, FLWP, GPID, GPID0, GPID2, GPID4, GPID6, GPIE0,
-            GPIE2, GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4,
-            I2C5, I2C6, I2C7, I2C8, I2C9, LPCPD, LPCPME, LPCRST, LPCSMI, MAC1LINK,
-            MAC2LINK, MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2,
-            NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4,
-            NDTS4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, OSCCLK, PWM0,
-            PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
-            RMII2, ROM16, ROM8, ROMCS1, ROMCS2, ROMCS3, ROMCS4, RXD1, RXD2, RXD3,
-            RXD4, SALT1, SALT2, SALT3, SALT4, SD1, SD2, SGPMCK, SGPMI, SGPMLD,
-            SGPMO, SGPSCK, SGPSI0, SGPSI1, SGPSLD, SIOONCTRL, SIOPBI, SIOPBO,
-            SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1DEBUG, SPI1PASSTHRU,
-            SPICS1, TIMER3, TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2,
-            TXD3, TXD4, UART6, USB11D1, USB11H2, USB2D1, USB2H1, USBCKI, VGABIOS_ROM,
-            VGAHS, VGAVS, VPI18, VPI24, VPI30, VPO12, VPO24, WDTRST1, WDTRST2]
+          enum: [ ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
+                  ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
+                  EXTRST, FLACK, FLBUSY, FLWP, GPID, GPID0, GPID2, GPID4, GPID6, GPIE0,
+                  GPIE2, GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4,
+                  I2C5, I2C6, I2C7, I2C8, I2C9, LPCPD, LPCPME, LPCRST, LPCSMI, MAC1LINK,
+                  MAC2LINK, MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2,
+                  NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4,
+                  NDTS4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, OSCCLK, PWM0,
+                  PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
+                  RMII2, ROM16, ROM8, ROMCS1, ROMCS2, ROMCS3, ROMCS4, RXD1, RXD2, RXD3,
+                  RXD4, SALT1, SALT2, SALT3, SALT4, SD1, SD2, SGPMCK, SGPMI, SGPMLD,
+                  SGPMO, SGPSCK, SGPSI0, SGPSI1, SGPSLD, SIOONCTRL, SIOPBI, SIOPBO,
+                  SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1DEBUG, SPI1PASSTHRU,
+                  SPICS1, TIMER3, TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2,
+                  TXD3, TXD4, UART6, USB11D1, USB11H2, USB2D1, USB2H1, USBCKI, VGABIOS_ROM,
+                  VGAHS, VGAVS, VPI18, VPI24, VPI30, VPO12, VPO24, WDTRST1, WDTRST2]
 
 required:
   - compatible
index c643d6d..a90c0fe 100644 (file)
@@ -43,24 +43,24 @@ patternProperties:
       patternProperties:
         "^function|groups$":
           $ref: "/schemas/types.yaml#/definitions/string"
-          enum: [ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-            ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
-            ESPI, FWSPICS1, FWSPICS2, GPID0, GPID2, GPID4, GPID6, GPIE0, GPIE2,
-            GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4, I2C5,
-            I2C6, I2C7, I2C8, I2C9, LAD0, LAD1, LAD2, LAD3, LCLK, LFRAME, LPCHC,
-            LPCPD, LPCPLUS, LPCPME, LPCRST, LPCSMI, LSIRQ, MAC1LINK, MAC2LINK,
-            MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
-            NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2,
-            NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PNOR, PWM0,
-            PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
-            RMII2, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13,
-            SALT14, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9, SCL1,
-            SCL2, SD1, SD2, SDA1, SDA2, SGPS1, SGPS2, SIOONCTRL, SIOPBI, SIOPBO,
-            SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1CS1, SPI1DEBUG,
-            SPI1PASSTHRU, SPI2CK, SPI2CS0, SPI2CS1, SPI2MISO, SPI2MOSI, TIMER3,
-            TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2, TXD3, TXD4, UART6,
-            USB11BHID, USB2AD, USB2AH, USB2BD, USB2BH, USBCKI, VGABIOSROM, VGAHS,
-            VGAVS, VPI24, VPO, WDTRST1, WDTRST2]
+          enum: [ ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
+                  ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
+                  ESPI, FWSPICS1, FWSPICS2, GPID0, GPID2, GPID4, GPID6, GPIE0, GPIE2,
+                  GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4, I2C5,
+                  I2C6, I2C7, I2C8, I2C9, LAD0, LAD1, LAD2, LAD3, LCLK, LFRAME, LPCHC,
+                  LPCPD, LPCPLUS, LPCPME, LPCRST, LPCSMI, LSIRQ, MAC1LINK, MAC2LINK,
+                  MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
+                  NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2,
+                  NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PNOR, PWM0,
+                  PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
+                  RMII2, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13,
+                  SALT14, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9, SCL1,
+                  SCL2, SD1, SD2, SDA1, SDA2, SGPS1, SGPS2, SIOONCTRL, SIOPBI, SIOPBO,
+                  SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1CS1, SPI1DEBUG,
+                  SPI1PASSTHRU, SPI2CK, SPI2CS0, SPI2CS1, SPI2MISO, SPI2MOSI, TIMER3,
+                  TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2, TXD3, TXD4, UART6,
+                  USB11BHID, USB2AD, USB2AH, USB2BD, USB2BH, USBCKI, VGABIOSROM, VGAHS,
+                  VGAVS, VPI24, VPO, WDTRST1, WDTRST2]
 
 required:
   - compatible
index 1506726..c78ab7e 100644 (file)
@@ -31,57 +31,57 @@ patternProperties:
       properties:
         function:
           $ref: "/schemas/types.yaml#/definitions/string"
-          enum: [ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
-            ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT,
-            FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
-            GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5,
-            GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16,
-            I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5,
-            I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
-            MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4,
-            NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2,
-            NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4,
-            NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PWM0, PWM1, PWM10, PWM11,
-            PWM12, PWM13, PWM14, PWM15, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8,
-            PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
-            RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13, SALT14,
-            SALT15, SALT16, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8,
-            SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ,
-            SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
-            SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13, TACH14,
-            TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0,
-            THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12,
-            UART13, UART6, UART7, UART8, UART9, USBAD, USBADP, USB2AH, USB2AHP,
-            USB2BD, USB2BH, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4]
+          enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
+                  ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT,
+                  FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
+                  GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5,
+                  GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16,
+                  I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5,
+                  I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
+                  MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4,
+                  NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2,
+                  NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4,
+                  NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PWM0, PWM1, PWM10, PWM11,
+                  PWM12, PWM13, PWM14, PWM15, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8,
+                  PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
+                  RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13, SALT14,
+                  SALT15, SALT16, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8,
+                  SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ,
+                  SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
+                  SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13, TACH14,
+                  TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0,
+                  THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12,
+                  UART13, UART6, UART7, UART8, UART9, USBAD, USBADP, USB2AH, USB2AHP,
+                  USB2BD, USB2BH, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4 ]
 
         groups:
           $ref: "/schemas/types.yaml#/definitions/string"
-          enum: [ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
-            ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
-            EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
-            GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
-            GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
-            I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
-            I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
-            LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3,
-            MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4,
-            NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
-            NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
-            OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0,
-            PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2,
-            PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
-            QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
-            RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1, SALT11G0, SALT11G1,
-            SALT12G0, SALT12G1, SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0,
-            SALT15G1, SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6,
-            SALT7, SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
-            SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR,
-            SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
-            TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6,
-            TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3,
-            TXD4, UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
-            UART7, UART8, UART9, USBA, USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
-            WDTRST3, WDTRST4]
+          enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
+                  ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
+                  EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
+                  GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+                  GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
+                  I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
+                  I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+                  LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3,
+                  MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4,
+                  NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+                  NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+                  OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0,
+                  PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2,
+                  PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
+                  QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
+                  RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1, SALT11G0, SALT11G1,
+                  SALT12G0, SALT12G1, SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0,
+                  SALT15G1, SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6,
+                  SALT7, SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+                  SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR,
+                  SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
+                  TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6,
+                  TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3,
+                  TXD4, UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
+                  UART7, UART8, UART9, USBA, USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+                  WDTRST3, WDTRST4]
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
deleted file mode 100644 (file)
index d9b2100..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-Ingenic XBurst pin controller
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices, including the meaning of the
-phrase "pin configuration node".
-
-For the XBurst SoCs, pin control is tightly bound with GPIO ports. All pins may
-be used as GPIOs, multiplexed device functions are configured within the
-GPIO port configuration registers and it is typical to refer to pins using the
-naming scheme "PxN" where x is a character identifying the GPIO port with
-which the pin is associated and N is an integer from 0 to 31 identifying the
-pin within that GPIO port. For example PA0 is the first pin in GPIO port A, and
-PB31 is the last pin in GPIO port B. The jz4740, the x1000 and the x1830
-contains 4 GPIO ports, PA to PD, for a total of 128 pins. The jz4760, the
-jz4770 and the jz4780 contains 6 GPIO ports, PA to PF, for a total of 192 pins.
-
-
-Required properties:
---------------------
-
- - compatible: One of:
-    - "ingenic,jz4740-pinctrl"
-    - "ingenic,jz4725b-pinctrl"
-    - "ingenic,jz4760-pinctrl"
-    - "ingenic,jz4760b-pinctrl"
-    - "ingenic,jz4770-pinctrl"
-    - "ingenic,jz4780-pinctrl"
-    - "ingenic,x1000-pinctrl"
-    - "ingenic,x1000e-pinctrl"
-    - "ingenic,x1500-pinctrl"
-    - "ingenic,x1830-pinctrl"
- - reg: Address range of the pinctrl registers.
-
-
-Required properties for sub-nodes (GPIO chips):
------------------------------------------------
-
- - compatible: Must contain one of:
-    - "ingenic,jz4740-gpio"
-    - "ingenic,jz4760-gpio"
-    - "ingenic,jz4770-gpio"
-    - "ingenic,jz4780-gpio"
-    - "ingenic,x1000-gpio"
-    - "ingenic,x1830-gpio"
- - reg: The GPIO bank number.
- - interrupt-controller: Marks the device node as an interrupt controller.
- - interrupts: Interrupt specifier for the controllers interrupt.
- - #interrupt-cells: Should be 2. Refer to
-   ../interrupt-controller/interrupts.txt for more details.
- - gpio-controller: Marks the device node as a GPIO controller.
- - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
-    cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
-    GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
- - gpio-ranges: Range of pins managed by the GPIO controller. Refer to
-   ../gpio/gpio.txt for more details.
-
-
-Example:
---------
-
-pinctrl: pin-controller@10010000 {
-       compatible = "ingenic,jz4740-pinctrl";
-       reg = <0x10010000 0x400>;
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       gpa: gpio@0 {
-               compatible = "ingenic,jz4740-gpio";
-               reg = <0>;
-
-               gpio-controller;
-               gpio-ranges = <&pinctrl 0 0 32>;
-               #gpio-cells = <2>;
-
-               interrupt-controller;
-               #interrupt-cells = <2>;
-
-               interrupt-parent = <&intc>;
-               interrupts = <28>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml
new file mode 100644 (file)
index 0000000..44c04d1
--- /dev/null
@@ -0,0 +1,176 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/ingenic,pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs pin controller devicetree bindings
+
+description: >
+  Please refer to pinctrl-bindings.txt in this directory for details of the
+  common pinctrl bindings used by client devices, including the meaning of the
+  phrase "pin configuration node".
+
+  For the Ingenic SoCs, pin control is tightly bound with GPIO ports. All pins
+  may be used as GPIOs, multiplexed device functions are configured within the
+  GPIO port configuration registers and it is typical to refer to pins using the
+  naming scheme "PxN" where x is a character identifying the GPIO port with
+  which the pin is associated and N is an integer from 0 to 31 identifying the
+  pin within that GPIO port. For example PA0 is the first pin in GPIO port A,
+  and PB31 is the last pin in GPIO port B. The JZ4740, the X1000 and the X1830
+  contains 4 GPIO ports, PA to PD, for a total of 128 pins. The JZ4760, the
+  JZ4770 and the JZ4780 contains 6 GPIO ports, PA to PF, for a total of 192
+  pins.
+
+maintainers:
+  - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+  nodename:
+    pattern: "^pinctrl@[0-9a-f]+$"
+
+  compatible:
+    oneOf:
+      - enum:
+          - ingenic,jz4740-pinctrl
+          - ingenic,jz4725b-pinctrl
+          - ingenic,jz4760-pinctrl
+          - ingenic,jz4770-pinctrl
+          - ingenic,jz4780-pinctrl
+          - ingenic,x1000-pinctrl
+          - ingenic,x1500-pinctrl
+          - ingenic,x1830-pinctrl
+      - items:
+          - const: ingenic,jz4760b-pinctrl
+          - const: ingenic,jz4760-pinctrl
+      - items:
+          - const: ingenic,x1000e-pinctrl
+          - const: ingenic,x1000-pinctrl
+
+  reg:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+patternProperties:
+  "^gpio@[0-9]$":
+    type: object
+    properties:
+      compatible:
+        enum:
+          - ingenic,jz4740-gpio
+          - ingenic,jz4725b-gpio
+          - ingenic,jz4760-gpio
+          - ingenic,jz4770-gpio
+          - ingenic,jz4780-gpio
+          - ingenic,x1000-gpio
+          - ingenic,x1500-gpio
+          - ingenic,x1830-gpio
+
+      reg:
+        items:
+          - description: The GPIO bank number
+
+      gpio-controller: true
+
+      "#gpio-cells":
+        const: 2
+
+      gpio-ranges:
+        maxItems: 1
+
+      interrupt-controller: true
+
+      "#interrupt-cells":
+        const: 2
+        description:
+          Refer to ../interrupt-controller/interrupts.txt for more details.
+
+      interrupts:
+        maxItems: 1
+
+    required:
+      - compatible
+      - reg
+      - gpio-controller
+      - "#gpio-cells"
+      - interrupts
+      - interrupt-controller
+      - "#interrupt-cells"
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - "#address-cells"
+  - "#size-cells"
+
+additionalProperties:
+  anyOf:
+    - type: object
+      allOf:
+        - $ref: pincfg-node.yaml#
+        - $ref: pinmux-node.yaml#
+
+      properties:
+        phandle: true
+        function: true
+        groups: true
+        pins: true
+        bias-disable: true
+        bias-pull-up: true
+        bias-pull-down: true
+        output-low: true
+        output-high: true
+      additionalProperties: false
+
+    - type: object
+      properties:
+        phandle: true
+      additionalProperties:
+        type: object
+        allOf:
+          - $ref: pincfg-node.yaml#
+          - $ref: pinmux-node.yaml#
+
+        properties:
+          phandle: true
+          function: true
+          groups: true
+          pins: true
+          bias-disable: true
+          bias-pull-up: true
+          bias-pull-down: true
+          output-low: true
+          output-high: true
+        additionalProperties: false
+
+examples:
+  - |
+    pin-controller@10010000 {
+      compatible = "ingenic,jz4770-pinctrl";
+      reg = <0x10010000 0x600>;
+
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      gpio@0 {
+        compatible = "ingenic,jz4770-gpio";
+        reg = <0>;
+
+        gpio-controller;
+        gpio-ranges = <&pinctrl 0 0 32>;
+        #gpio-cells = <2>;
+
+        interrupt-controller;
+        #interrupt-cells = <2>;
+
+        interrupt-parent = <&intc>;
+        interrupts = <17>;
+      };
+    };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
new file mode 100644 (file)
index 0000000..152c151
--- /dev/null
@@ -0,0 +1,202 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/mediatek,mt6779-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek MT6779 Pin Controller Device Tree Bindings
+
+maintainers:
+  - Andy Teng <andy.teng@mediatek.com>
+
+description: |+
+  The pin controller node should be the child of a syscon node with the
+  required property:
+  - compatible: "syscon"
+
+properties:
+  compatible:
+    const: mediatek,mt6779-pinctrl
+
+  reg:
+    minItems: 9
+    maxItems: 9
+
+  reg-names:
+    items:
+      - const: "gpio"
+      - const: "iocfg_rm"
+      - const: "iocfg_br"
+      - const: "iocfg_lm"
+      - const: "iocfg_lb"
+      - const: "iocfg_rt"
+      - const: "iocfg_lt"
+      - const: "iocfg_tl"
+      - const: "eint"
+
+  gpio-controller: true
+
+  "#gpio-cells":
+    const: 2
+    description: |
+      Number of cells in GPIO specifier. Since the generic GPIO
+      binding is used, the amount of cells must be specified as 2. See the below
+      mentioned gpio binding representation for description of particular cells.
+
+  gpio-ranges:
+    minItems: 1
+    maxItems: 5
+    description: |
+      GPIO valid number range.
+
+  interrupt-controller: true
+
+  interrupts:
+    maxItems: 1
+    description: |
+      Specifies the summary IRQ.
+
+  "#interrupt-cells":
+    const: 2
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - gpio-controller
+  - "#gpio-cells"
+  - gpio-ranges
+  - interrupt-controller
+  - interrupts
+  - "#interrupt-cells"
+
+patternProperties:
+  '-[0-9]*$':
+    type: object
+    patternProperties:
+      '-pins*$':
+        type: object
+        description: |
+          A pinctrl node should contain at least one subnodes representing the
+          pinctrl groups available on the machine. Each subnode will list the
+          pins it needs, and how they should be configured, with regard to muxer
+          configuration, pullups, drive strength, input enable/disable and input schmitt.
+        $ref: "/schemas/pinctrl/pincfg-node.yaml"
+
+        properties:
+          pinmux:
+            description:
+              integer array, represents gpio pin number and mux setting.
+              Supported pin number and mux varies for different SoCs, and are defined
+              as macros in boot/dts/<soc>-pinfunc.h directly.
+
+          bias-disable: true
+
+          bias-pull-up: true
+
+          bias-pull-down: true
+
+          input-enable: true
+
+          input-disable: true
+
+          output-low: true
+
+          output-high: true
+
+          input-schmitt-enable: true
+
+          input-schmitt-disable: true
+
+          mediatek,pull-up-adv:
+            description: |
+              Pull up setings for 2 pull resistors, R0 and R1. User can
+              configure those special pins. Valid arguments are described as below:
+              0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+              1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+              2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+              3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+            $ref: /schemas/types.yaml#/definitions/uint32
+            enum: [0, 1, 2, 3]
+
+          mediatek,pull-down-adv:
+            description: |
+              Pull down settings for 2 pull resistors, R0 and R1. User can
+              configure those special pins. Valid arguments are described as below:
+              0: (R1, R0) = (0, 0) which means R1 disabled and R0 disabled.
+              1: (R1, R0) = (0, 1) which means R1 disabled and R0 enabled.
+              2: (R1, R0) = (1, 0) which means R1 enabled and R0 disabled.
+              3: (R1, R0) = (1, 1) which means R1 enabled and R0 enabled.
+            $ref: /schemas/types.yaml#/definitions/uint32
+            enum: [0, 1, 2, 3]
+
+        required:
+          - pinmux
+
+        additionalProperties: false
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/pinctrl/mt6779-pinfunc.h>
+
+    soc {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        pio: pinctrl@10005000 {
+            compatible = "mediatek,mt6779-pinctrl";
+            reg = <0 0x10005000 0 0x1000>,
+                <0 0x11c20000 0 0x1000>,
+                <0 0x11d10000 0 0x1000>,
+                <0 0x11e20000 0 0x1000>,
+                <0 0x11e70000 0 0x1000>,
+                <0 0x11ea0000 0 0x1000>,
+                <0 0x11f20000 0 0x1000>,
+                <0 0x11f30000 0 0x1000>,
+                <0 0x1000b000 0 0x1000>;
+            reg-names = "gpio", "iocfg_rm",
+              "iocfg_br", "iocfg_lm",
+              "iocfg_lb", "iocfg_rt",
+              "iocfg_lt", "iocfg_tl",
+              "eint";
+            gpio-controller;
+            #gpio-cells = <2>;
+            gpio-ranges = <&pio 0 0 210>;
+            interrupt-controller;
+            #interrupt-cells = <2>;
+            interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+
+            mmc0_pins_default: mmc0-0 {
+                cmd-dat-pins {
+                    pinmux = <PINMUX_GPIO168__FUNC_MSDC0_DAT0>,
+                        <PINMUX_GPIO172__FUNC_MSDC0_DAT1>,
+                        <PINMUX_GPIO169__FUNC_MSDC0_DAT2>,
+                        <PINMUX_GPIO177__FUNC_MSDC0_DAT3>,
+                        <PINMUX_GPIO170__FUNC_MSDC0_DAT4>,
+                        <PINMUX_GPIO173__FUNC_MSDC0_DAT5>,
+                        <PINMUX_GPIO171__FUNC_MSDC0_DAT6>,
+                        <PINMUX_GPIO174__FUNC_MSDC0_DAT7>,
+                        <PINMUX_GPIO167__FUNC_MSDC0_CMD>;
+                    input-enable;
+                    mediatek,pull-up-adv = <1>;
+                };
+                clk-pins {
+                    pinmux = <PINMUX_GPIO176__FUNC_MSDC0_CLK>;
+                    mediatek,pull-down-adv = <2>;
+                };
+                rst-pins {
+                    pinmux = <PINMUX_GPIO178__FUNC_MSDC0_RSTB>;
+                    mediatek,pull-up-adv = <0>;
+                };
+            };
+        };
+
+        mmc0 {
+           pinctrl-0 = <&mmc0_pins_default>;
+           pinctrl-names = "default";
+        };
+    };
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt
deleted file mode 100644 (file)
index c1b4c18..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-STMicroelectronics Multi-Function eXpander (STMFX) GPIO expander bindings
-
-ST Multi-Function eXpander (STMFX) offers up to 24 GPIOs expansion.
-Please refer to ../mfd/stmfx.txt for STMFX Core bindings.
-
-Required properties:
-- compatible: should be "st,stmfx-0300-pinctrl".
-- #gpio-cells: should be <2>, the first cell is the GPIO number and the second
-  cell is the gpio flags in accordance with <dt-bindings/gpio/gpio.h>.
-- gpio-controller: marks the device as a GPIO controller.
-- #interrupt-cells: should be <2>, the first cell is the GPIO number and the
-  second cell is the interrupt flags in accordance with
-  <dt-bindings/interrupt-controller/irq.h>.
-- interrupt-controller: marks the device as an interrupt controller.
-- gpio-ranges: specifies the mapping between gpio controller and pin
-  controller pins. Check "Concerning gpio-ranges property" below.
-Please refer to ../gpio/gpio.txt.
-
-Please refer to pinctrl-bindings.txt for pin configuration.
-
-Required properties for pin configuration sub-nodes:
-- pins: list of pins to which the configuration applies.
-
-Optional properties for pin configuration sub-nodes (pinconf-generic ones):
-- bias-disable: disable any bias on the pin.
-- bias-pull-up: the pin will be pulled up.
-- bias-pull-pin-default: use the pin-default pull state.
-- bias-pull-down: the pin will be pulled down.
-- drive-open-drain: the pin will be driven with open drain.
-- drive-push-pull: the pin will be driven actively high and low.
-- output-high: the pin will be configured as an output driving high level.
-- output-low: the pin will be configured as an output driving low level.
-
-Note that STMFX pins[15:0] are called "gpio[15:0]", and STMFX pins[23:16] are
-called "agpio[7:0]". Example, to refer to pin 18 of STMFX, use "agpio2".
-
-Concerning gpio-ranges property:
-- if all STMFX pins[24:0] are available (no other STMFX function in use), you
-  should use gpio-ranges = <&stmfx_pinctrl 0 0 24>;
-- if agpio[3:0] are not available (STMFX Touchscreen function in use), you
-  should use gpio-ranges = <&stmfx_pinctrl 0 0 16>, <&stmfx_pinctrl 20 20 4>;
-- if agpio[7:4] are not available (STMFX IDD function in use), you
-  should use gpio-ranges = <&stmfx_pinctrl 0 0 20>;
-
-
-Example:
-
-       stmfx: stmfx@42 {
-               ...
-
-               stmfx_pinctrl: stmfx-pin-controller {
-                       compatible = "st,stmfx-0300-pinctrl";
-                       #gpio-cells = <2>;
-                       #interrupt-cells = <2>;
-                       gpio-controller;
-                       interrupt-controller;
-                       gpio-ranges = <&stmfx_pinctrl 0 0 24>;
-
-                       joystick_pins: joystick {
-                               pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
-                               drive-push-pull;
-                               bias-pull-up;
-                       };
-               };
-       };
-
-Example of STMFX GPIO consumers:
-
-       joystick {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&joystick_pins>;
-               pinctrl-names = "default";
-               button-0 {
-                       label = "JoySel";
-                       linux,code = <KEY_ENTER>;
-                       interrupt-parent = <&stmfx_pinctrl>;
-                       interrupts = <0 IRQ_TYPE_EDGE_RISING>;
-               };
-               button-1 {
-                       label = "JoyDown";
-                       linux,code = <KEY_DOWN>;
-                       interrupt-parent = <&stmfx_pinctrl>;
-                       interrupts = <1 IRQ_TYPE_EDGE_RISING>;
-               };
-               button-2 {
-                       label = "JoyLeft";
-                       linux,code = <KEY_LEFT>;
-                       interrupt-parent = <&stmfx_pinctrl>;
-                       interrupts = <2 IRQ_TYPE_EDGE_RISING>;
-               };
-               button-3 {
-                       label = "JoyRight";
-                       linux,code = <KEY_RIGHT>;
-                       interrupt-parent = <&stmfx_pinctrl>;
-                       interrupts = <3 IRQ_TYPE_EDGE_RISING>;
-               };
-               button-4 {
-                       label = "JoyUp";
-                       linux,code = <KEY_UP>;
-                       interrupt-parent = <&stmfx_pinctrl>;
-                       interrupts = <4 IRQ_TYPE_EDGE_RISING>;
-               };
-       };
-
-       leds {
-               compatible = "gpio-leds";
-               orange {
-                       gpios = <&stmfx_pinctrl 17 1>;
-               };
-
-               blue {
-                       gpios = <&stmfx_pinctrl 19 1>;
-               };
-       }
index 84be0f2..0861afe 100644 (file)
@@ -44,7 +44,8 @@ information about e.g. the mux function.
 
 The following generic properties as defined in pinctrl-bindings.txt are valid
 to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength.
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-open-drain,
+ drive-strength.
 
 Non-empty subnodes must specify the 'pins' property.
 Note that not all properties are valid for all pins.
index b2de399..c64c932 100644 (file)
@@ -60,8 +60,8 @@ patternProperties:
           oneOf:
             - pattern: "^gpio([1-9]|[1-7][0-9]|80)$"
             - enum: [ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
-              sdc2_data, qdsd_cmd, qdsd_data0, qdsd_data1, qdsd_data2,
-              qdsd_data3 ]
+                      sdc2_data, qdsd_cmd, qdsd_data0, qdsd_data1, qdsd_data2,
+                      qdsd_data3 ]
         minItems: 1
         maxItems: 4
 
@@ -70,31 +70,31 @@ patternProperties:
           Specify the alternative function to be configured for the specified
           pins.
         enum: [ adsp_ext, alsp_int, atest_bbrx0, atest_bbrx1, atest_char,
-          atest_char0, atest_char1, atest_char2, atest_char3, atest_combodac,
-          atest_gpsadc0, atest_gpsadc1, atest_tsens, atest_wlan0,
-          atest_wlan1, backlight_en, bimc_dte0, bimc_dte1, blsp1_i2c,
-          blsp2_i2c, blsp3_i2c, blsp4_i2c, blsp5_i2c, blsp6_i2c,  blsp1_spi,
-          blsp1_spi_cs1, blsp1_spi_cs2, blsp1_spi_cs3, blsp2_spi,
-          blsp2_spi_cs1, blsp2_spi_cs2, blsp2_spi_cs3, blsp3_spi,
-          blsp3_spi_cs1, blsp3_spi_cs2, blsp3_spi_cs3, blsp4_spi, blsp5_spi,
-          blsp6_spi, blsp1_uart, blsp2_uart, blsp1_uim, blsp2_uim, cam1_rst,
-          cam1_standby, cam_mclk0, cam_mclk1, cci_async, cci_i2c, cci_timer0,
-          cci_timer1, cci_timer2, cdc_pdm0, codec_mad, dbg_out, display_5v,
-          dmic0_clk, dmic0_data, dsi_rst, ebi0_wrcdc, euro_us, ext_lpass,
-          flash_strobe, gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a,
-          gcc_gp2_clk_b, gcc_gp3_clk_a, gcc_gp3_clk_b, gpio, gsm0_tx0,
-          gsm0_tx1, gsm1_tx0, gsm1_tx1, gyro_accl, kpsns0, kpsns1, kpsns2,
-          ldo_en, ldo_update, mag_int, mdp_vsync, modem_tsync, m_voc,
-          nav_pps, nav_tsync, pa_indicator, pbs0, pbs1, pbs2, pri_mi2s,
-          pri_mi2s_ws, prng_rosc, pwr_crypto_enabled_a, pwr_crypto_enabled_b,
-          pwr_modem_enabled_a,  pwr_modem_enabled_b, pwr_nav_enabled_a,
-          pwr_nav_enabled_b, qdss_ctitrig_in_a0, qdss_ctitrig_in_a1,
-          qdss_ctitrig_in_b0, qdss_ctitrig_in_b1, qdss_ctitrig_out_a0,
-          qdss_ctitrig_out_a1, qdss_ctitrig_out_b0, qdss_ctitrig_out_b1,
-          qdss_traceclk_a, qdss_traceclk_b, qdss_tracectl_a, qdss_tracectl_b,
-          qdss_tracedata_a, qdss_tracedata_b, reset_n, sd_card, sd_write,
-          sec_mi2s, smb_int, ssbi_wtr0, ssbi_wtr1, uim1, uim2, uim3,
-          uim_batt, wcss_bt, wcss_fm, wcss_wlan, webcam1_rst ]
+                atest_char0, atest_char1, atest_char2, atest_char3, atest_combodac,
+                atest_gpsadc0, atest_gpsadc1, atest_tsens, atest_wlan0,
+                atest_wlan1, backlight_en, bimc_dte0, bimc_dte1, blsp1_i2c,
+                blsp2_i2c, blsp3_i2c, blsp4_i2c, blsp5_i2c, blsp6_i2c, blsp1_spi,
+                blsp1_spi_cs1, blsp1_spi_cs2, blsp1_spi_cs3, blsp2_spi,
+                blsp2_spi_cs1, blsp2_spi_cs2, blsp2_spi_cs3, blsp3_spi,
+                blsp3_spi_cs1, blsp3_spi_cs2, blsp3_spi_cs3, blsp4_spi, blsp5_spi,
+                blsp6_spi, blsp1_uart, blsp2_uart, blsp1_uim, blsp2_uim, cam1_rst,
+                cam1_standby, cam_mclk0, cam_mclk1, cci_async, cci_i2c, cci_timer0,
+                cci_timer1, cci_timer2, cdc_pdm0, codec_mad, dbg_out, display_5v,
+                dmic0_clk, dmic0_data, dsi_rst, ebi0_wrcdc, euro_us, ext_lpass,
+                flash_strobe, gcc_gp1_clk_a, gcc_gp1_clk_b, gcc_gp2_clk_a,
+                gcc_gp2_clk_b, gcc_gp3_clk_a, gcc_gp3_clk_b, gpio, gsm0_tx0,
+                gsm0_tx1, gsm1_tx0, gsm1_tx1, gyro_accl, kpsns0, kpsns1, kpsns2,
+                ldo_en, ldo_update, mag_int, mdp_vsync, modem_tsync, m_voc,
+                nav_pps, nav_tsync, pa_indicator, pbs0, pbs1, pbs2, pri_mi2s,
+                pri_mi2s_ws, prng_rosc, pwr_crypto_enabled_a, pwr_crypto_enabled_b,
+                pwr_modem_enabled_a, pwr_modem_enabled_b, pwr_nav_enabled_a,
+                pwr_nav_enabled_b, qdss_ctitrig_in_a0, qdss_ctitrig_in_a1,
+                qdss_ctitrig_in_b0, qdss_ctitrig_in_b1, qdss_ctitrig_out_a0,
+                qdss_ctitrig_out_a1, qdss_ctitrig_out_b0, qdss_ctitrig_out_b1,
+                qdss_traceclk_a, qdss_traceclk_b, qdss_tracectl_a, qdss_tracectl_b,
+                qdss_tracedata_a, qdss_tracedata_b, reset_n, sd_card, sd_write,
+                sec_mi2s, smb_int, ssbi_wtr0, ssbi_wtr1, uim1, uim2, uim3,
+                uim_batt, wcss_bt, wcss_fm, wcss_wlan, webcam1_rst ]
 
       drive-strength:
         enum: [2, 4, 6, 8, 10, 12, 14, 16]
index 7be5de8..c3d1914 100644 (file)
@@ -23,6 +23,8 @@ PMIC's from Qualcomm.
                    "qcom,pmi8994-gpio"
                    "qcom,pmi8998-gpio"
                    "qcom,pms405-gpio"
+                   "qcom,pm660-gpio"
+                   "qcom,pm660l-gpio"
                    "qcom,pm8150-gpio"
                    "qcom,pm8150b-gpio"
                    "qcom,pm6150-gpio"
index 6dc3b52..8508c57 100644 (file)
@@ -76,22 +76,22 @@ patternProperties:
             pins.
 
           enum: [ aoss_cti, atest, audio_ref, cam_mclk, cci_async, cci_i2c,
-            cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4, cri_trng,
-            cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
-            ddr_pxi2, ddr_pxi3, dp_hot, dp_lcd, gcc_gp1, gcc_gp2, gcc_gp3, gpio,
-            ibi_i3c, jitter_bist, lpass_slimbus, mdp_vsync, mdp_vsync0,
-            mdp_vsync1, mdp_vsync2, mdp_vsync3, mi2s0_data0, mi2s0_data1,
-            mi2s0_sck, mi2s0_ws, mi2s1_data0, mi2s1_data1, mi2s1_sck, mi2s1_ws,
-            mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, pci_e0, pci_e1,
-            pci_e2, phase_flag, pll_bist, pll_bypassnl, pll_clk, pll_reset,
-            pri_mi2s, prng_rosc, qdss_cti, qdss_gpio, qspi0, qspi1, qspi2, qspi3,
-            qspi_clk, qspi_cs, qup0, qup1, qup10, qup11, qup12, qup13, qup14,
-            qup15, qup16, qup17, qup18, qup19, qup2, qup3, qup4, qup5, qup6,
-            qup7, qup8, qup9, qup_l4, qup_l5, qup_l6, sd_write, sdc40, sdc41,
-            sdc42, sdc43, sdc4_clk, sdc4_cmd, sec_mi2s, sp_cmu, tgu_ch0, tgu_ch1,
-            tgu_ch2, tgu_ch3, tsense_pwm1, tsense_pwm2, tsif0_clk, tsif0_data,
-            tsif0_en, tsif0_error, tsif0_sync, tsif1_clk, tsif1_data, tsif1_en,
-            tsif1_error, tsif1_sync, usb2phy_ac, usb_phy, vsense_trigger ]
+                  cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4, cri_trng,
+                  cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+                  ddr_pxi2, ddr_pxi3, dp_hot, dp_lcd, gcc_gp1, gcc_gp2, gcc_gp3, gpio,
+                  ibi_i3c, jitter_bist, lpass_slimbus, mdp_vsync, mdp_vsync0,
+                  mdp_vsync1, mdp_vsync2, mdp_vsync3, mi2s0_data0, mi2s0_data1,
+                  mi2s0_sck, mi2s0_ws, mi2s1_data0, mi2s1_data1, mi2s1_sck, mi2s1_ws,
+                  mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, pci_e0, pci_e1,
+                  pci_e2, phase_flag, pll_bist, pll_bypassnl, pll_clk, pll_reset,
+                  pri_mi2s, prng_rosc, qdss_cti, qdss_gpio, qspi0, qspi1, qspi2, qspi3,
+                  qspi_clk, qspi_cs, qup0, qup1, qup10, qup11, qup12, qup13, qup14,
+                  qup15, qup16, qup17, qup18, qup19, qup2, qup3, qup4, qup5, qup6,
+                  qup7, qup8, qup9, qup_l4, qup_l5, qup_l6, sd_write, sdc40, sdc41,
+                  sdc42, sdc43, sdc4_clk, sdc4_cmd, sec_mi2s, sp_cmu, tgu_ch0, tgu_ch1,
+                  tgu_ch2, tgu_ch3, tsense_pwm1, tsense_pwm2, tsif0_clk, tsif0_data,
+                  tsif0_en, tsif0_error, tsif0_sync, tsif1_clk, tsif1_data, tsif1_en,
+                  tsif1_error, tsif1_sync, usb2phy_ac, usb_phy, vsense_trigger ]
 
         drive-strength:
           enum: [2, 4, 6, 8, 10, 12, 14, 16]
index 1b8e8b4..d75476e 100644 (file)
@@ -21,6 +21,7 @@ Required Properties:
     - "renesas,pfc-r8a774a1": for R8A774A1 (RZ/G2M) compatible pin-controller.
     - "renesas,pfc-r8a774b1": for R8A774B1 (RZ/G2N) compatible pin-controller.
     - "renesas,pfc-r8a774c0": for R8A774C0 (RZ/G2E) compatible pin-controller.
+    - "renesas,pfc-r8a774e1": for R8A774E1 (RZ/G2H) compatible pin-controller.
     - "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
     - "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
     - "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.txt
deleted file mode 100644 (file)
index a63ccd4..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-Renesas RZ/A2 combined Pin and GPIO controller
-
-The Renesas SoCs of the RZ/A2 series feature a combined Pin and GPIO controller.
-Pin multiplexing and GPIO configuration is performed on a per-pin basis.
-Each port features up to 8 pins, each of them configurable for GPIO
-function (port mode) or in alternate function mode.
-Up to 8 different alternate function modes exist for each single pin.
-
-Pin controller node
--------------------
-
-Required properties:
-  - compatible: shall be:
-    - "renesas,r7s9210-pinctrl": for RZ/A2M
-  - reg
-    Address base and length of the memory area where the pin controller
-    hardware is mapped to.
-  - gpio-controller
-    This pin controller also controls pins as GPIO
-  - #gpio-cells
-    Must be 2
-  - gpio-ranges
-    Expresses the total number of GPIO ports/pins in this SoC
-
-Example: Pin controller node for RZ/A2M SoC (r7s9210)
-
-       pinctrl: pin-controller@fcffe000 {
-               compatible = "renesas,r7s9210-pinctrl";
-               reg = <0xfcffe000 0x1000>;
-
-               gpio-controller;
-               #gpio-cells = <2>;
-               gpio-ranges = <&pinctrl 0 0 176>;
-       };
-
-Sub-nodes
----------
-
-The child nodes of the pin controller designate pins to be used for
-specific peripheral functions or as GPIO.
-
-- Pin multiplexing sub-nodes:
-  A pin multiplexing sub-node describes how to configure a set of
-  (or a single) pin in some desired alternate function mode.
-  The values for the pinmux properties are a combination of port name, pin
-  number and the desired function index. Use the RZA2_PINMUX macro located
-  in include/dt-bindings/pinctrl/r7s9210-pinctrl.h to easily define these.
-  For assigning GPIO pins, use the macro RZA2_PIN also in r7s9210-pinctrl.h
-  to express the desired port pin.
-
-  Required properties:
-    - pinmux:
-      integer array representing pin number and pin multiplexing configuration.
-      When a pin has to be configured in alternate function mode, use this
-      property to identify the pin by its global index, and provide its
-      alternate function configuration number along with it.
-      When multiple pins are required to be configured as part of the same
-      alternate function they shall be specified as members of the same
-      argument list of a single "pinmux" property.
-      Helper macros to ease assembling the pin index from its position
-      (port where it sits on and pin number) and alternate function identifier
-      are provided by the pin controller header file at:
-      <dt-bindings/pinctrl/r7s9210-pinctrl.h>
-      Integers values in "pinmux" argument list are assembled as:
-      ((PORT * 8 + PIN) | MUX_FUNC << 16)
-
-  Example: Board specific pins configuration
-
-       &pinctrl {
-               /* Serial Console */
-               scif4_pins: serial4 {
-                       pinmux = <RZA2_PINMUX(PORT9, 0, 4)>,    /* TxD4 */
-                                <RZA2_PINMUX(PORT9, 1, 4)>;    /* RxD4 */
-               };
-       };
-
-  Example: Assigning a GPIO:
-
-       leds {
-               status = "okay";
-               compatible = "gpio-leds";
-
-               led0 {
-                       /* P6_0 */
-                       gpios = <&pinctrl RZA2_PIN(PORT6, 0) GPIO_ACTIVE_HIGH>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml
new file mode 100644 (file)
index 0000000..b7911a9
--- /dev/null
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/renesas,rza2-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/A2 combined Pin and GPIO controller
+
+maintainers:
+  - Chris Brandt <chris.brandt@renesas.com>
+  - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+  The Renesas SoCs of the RZ/A2 series feature a combined Pin and GPIO
+  controller.
+  Pin multiplexing and GPIO configuration is performed on a per-pin basis.
+  Each port features up to 8 pins, each of them configurable for GPIO function
+  (port mode) or in alternate function mode.
+  Up to 8 different alternate function modes exist for each single pin.
+
+properties:
+  compatible:
+    const: "renesas,r7s9210-pinctrl" # RZ/A2M
+
+  reg:
+    maxItems: 1
+
+  gpio-controller: true
+
+  '#gpio-cells':
+    const: 2
+    description:
+      The first cell contains the global GPIO port index, constructed using the
+      RZA2_PIN() helper macro in r7s9210-pinctrl.h.
+      E.g. "RZA2_PIN(PORT6, 0)" for P6_0.
+
+  gpio-ranges:
+    maxItems: 1
+
+patternProperties:
+  "^.*$":
+    if:
+      type: object
+    then:
+      allOf:
+        - $ref: pincfg-node.yaml#
+        - $ref: pinmux-node.yaml#
+      description:
+        The child nodes of the pin controller designate pins to be used for
+        specific peripheral functions or as GPIO.
+
+        A pin multiplexing sub-node describes how to configure a set of
+        (or a single) pin in some desired alternate function mode.
+        The values for the pinmux properties are a combination of port name,
+        pin number and the desired function index. Use the RZA2_PINMUX macro
+        located in include/dt-bindings/pinctrl/r7s9210-pinctrl.h to easily
+        define these.
+        For assigning GPIO pins, use the macro RZA2_PIN also in
+        to express the desired port pin.
+
+      properties:
+        phandle: true
+
+        pinmux:
+          description:
+            Values are constructed from GPIO port number, pin number, and
+            alternate function configuration number using the RZA2_PINMUX()
+            helper macro in r7s9210-pinctrl.h.
+
+      required:
+        - pinmux
+
+      additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - gpio-controller
+  - '#gpio-cells'
+  - gpio-ranges
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/pinctrl/r7s9210-pinctrl.h>
+    pinctrl: pin-controller@fcffe000 {
+            compatible = "renesas,r7s9210-pinctrl";
+            reg = <0xfcffe000 0x1000>;
+
+            gpio-controller;
+            #gpio-cells = <2>;
+            gpio-ranges = <&pinctrl 0 0 176>;
+
+            /* Serial Console */
+            scif4_pins: serial4 {
+                    pinmux = <RZA2_PINMUX(PORT9, 0, 4)>, /* TxD4 */
+                             <RZA2_PINMUX(PORT9, 1, 4)>; /* RxD4 */
+            };
+    };
index 0857cbe..7287754 100644 (file)
@@ -48,8 +48,8 @@ properties:
 
   st,package:
     description:
-     Indicates the SOC package used.
-     More details in include/dt-bindings/pinctrl/stm32-pinfunc.h
+      Indicates the SOC package used.
+      More details in include/dt-bindings/pinctrl/stm32-pinfunc.h
     $ref: /schemas/types.yaml#/definitions/uint32
     enum: [1, 2, 4, 8]
 
index ff5936e..dd56434 100644 (file)
@@ -58,13 +58,13 @@ properties:
 
   power-domains:
     description:
-       A phandle and PM domain specifier as defined by bindings of the power
-       controller specified by phandle. Some power domains might be powered
-       from another power domain (or have other hardware specific
-       dependencies). For representing such dependency a standard PM domain
-       consumer binding is used. When provided, all domains created
-       by the given provider should be subdomains of the domain specified
-       by this binding.
+      A phandle and PM domain specifier as defined by bindings of the power
+      controller specified by phandle. Some power domains might be powered
+      from another power domain (or have other hardware specific
+      dependencies). For representing such dependency a standard PM domain
+      consumer binding is used. When provided, all domains created
+      by the given provider should be subdomains of the domain specified
+      by this binding.
 
 required:
   - "#power-domain-cells"
index 30eabbb..6244b8e 100644 (file)
@@ -44,9 +44,9 @@ required:
 
 anyOf:
   - required:
-    - gpios
+      - gpios
   - required:
-    - charge-status-gpios
+      - charge-status-gpios
 
 additionalProperties: false
 
index da68f4a..8740e07 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale MXS PWM controller
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
   - Anson Huang <anson.huang@nxp.com>
 
 properties:
index d202220..c0d7700 100644 (file)
@@ -76,8 +76,7 @@ patternProperties:
 
   "^((s|l|lvs|5vs)[0-9]*)|(boost-bypass)|(bob)$":
     description: List of regulators and its properties
-    allOf:
-     - $ref: regulator.yaml#
+    $ref: regulator.yaml#
 
 additionalProperties: false
 
index 085cbd1..fb111e2 100644 (file)
@@ -29,7 +29,7 @@ properties:
           Short-circuit interrupt for lab.
 
     required:
-    - interrupts
+      - interrupts
 
   ibb:
     type: object
@@ -42,7 +42,7 @@ properties:
           Short-circuit interrupt for lab.
 
     required:
-    - interrupts
+      - interrupts
 
 required:
   - compatible
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,pil-info.yaml
new file mode 100644 (file)
index 0000000..87c5231
--- /dev/null
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,pil-info.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm peripheral image loader relocation info binding
+
+maintainers:
+  - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+  The Qualcomm peripheral image loader relocation memory region, in IMEM, is
+  used for communicating remoteproc relocation information to post mortem
+  debugging tools.
+
+properties:
+  compatible:
+    const: qcom,pil-reloc-info
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    imem@146bf000 {
+      compatible = "syscon", "simple-mfd";
+      reg = <0x146bf000 0x1000>;
+
+      #address-cells = <1>;
+      #size-cells = <1>;
+
+      ranges = <0 0x146bf000 0x1000>;
+
+      pil-reloc@94c {
+        compatible = "qcom,pil-reloc-info";
+        reg = <0x94c 0xc8>;
+      };
+    };
+...
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
new file mode 100644 (file)
index 0000000..6070456
--- /dev/null
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/ti,k3-dsp-rproc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI K3 DSP devices
+
+maintainers:
+  - Suman Anna <s-anna@ti.com>
+
+description: |
+  The TI K3 family of SoCs usually have one or more TI DSP Core sub-systems
+  that are used to offload some of the processor-intensive tasks or algorithms,
+  for achieving various system level goals.
+
+  These processor sub-systems usually contain additional sub-modules like
+  L1 and/or L2 caches/SRAMs, an Interrupt Controller, an external memory
+  controller, a dedicated local power/sleep controller etc. The DSP processor
+  cores in the K3 SoCs are usually either a TMS320C66x CorePac processor or a
+  TMS320C71x CorePac processor.
+
+  Each DSP Core sub-system is represented as a single DT node. Each node has a
+  number of required or optional properties that enable the OS running on the
+  host processor (Arm CorePac) to perform the device management of the remote
+  processor and to communicate with the remote processor.
+
+allOf:
+  - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
+
+properties:
+  compatible:
+    enum:
+      - ti,j721e-c66-dsp
+      - ti,j721e-c71-dsp
+    description:
+      Use "ti,j721e-c66-dsp" for C66x DSPs on K3 J721E SoCs
+      Use "ti,j721e-c71-dsp" for C71x DSPs on K3 J721E SoCs
+
+  resets:
+    description: |
+      Should contain the phandle to the reset controller node managing the
+      local resets for this device, and a reset specifier.
+    maxItems: 1
+
+  firmware-name:
+    description: |
+      Should contain the name of the default firmware image
+      file located on the firmware search path
+
+  mboxes:
+    description: |
+      OMAP Mailbox specifier denoting the sub-mailbox, to be used for
+      communication with the remote processor. This property should match
+      with the sub-mailbox node used in the firmware image.
+    maxItems: 1
+
+  memory-region:
+    minItems: 2
+    maxItems: 8
+    description: |
+      phandle to the reserved memory nodes to be associated with the remoteproc
+      device. There should be at least two reserved memory nodes defined. The
+      reserved memory nodes should be carveout nodes, and should be defined as
+      per the bindings in
+      Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+    items:
+      - description: region used for dynamic DMA allocations like vrings and
+                     vring buffers
+      - description: region reserved for firmware image sections
+    additionalItems: true
+
+# Optional properties:
+# --------------------
+
+  sram:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    minItems: 1
+    maxItems: 4
+    description: |
+      phandles to one or more reserved on-chip SRAM regions. The regions
+      should be defined as child nodes of the respective SRAM node, and
+      should be defined as per the generic bindings in,
+      Documentation/devicetree/bindings/sram/sram.yaml
+
+if:
+  properties:
+    compatible:
+      enum:
+        - ti,j721e-c66-dsp
+then:
+  properties:
+    reg:
+      items:
+        - description: Address and Size of the L2 SRAM internal memory region
+        - description: Address and Size of the L1 PRAM internal memory region
+        - description: Address and Size of the L1 DRAM internal memory region
+    reg-names:
+      items:
+        - const: l2sram
+        - const: l1pram
+        - const: l1dram
+else:
+  if:
+    properties:
+      compatible:
+        enum:
+          - ti,j721e-c71-dsp
+  then:
+    properties:
+      reg:
+        items:
+          - description: Address and Size of the L2 SRAM internal memory region
+          - description: Address and Size of the L1 DRAM internal memory region
+      reg-names:
+        items:
+          - const: l2sram
+          - const: l1dram
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - ti,sci
+  - ti,sci-dev-id
+  - ti,sci-proc-ids
+  - resets
+  - firmware-name
+  - mboxes
+  - memory-region
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    / {
+        model = "Texas Instruments K3 J721E SoC";
+        compatible = "ti,j721e";
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        bus@100000 {
+            compatible = "simple-bus";
+            #address-cells = <2>;
+            #size-cells = <2>;
+            ranges = <0x00 0x00100000 0x00 0x00100000 0x00 0x00020000>, /* ctrl mmr */
+                     <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71_0 */
+                     <0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */
+                     <0x4d 0x81800000 0x4d 0x81800000 0x00 0x00800000>; /* C66_1 */
+
+            /* J721E C66_0 DSP node */
+            dsp@4d80800000 {
+                compatible = "ti,j721e-c66-dsp";
+                reg = <0x4d 0x80800000 0x00 0x00048000>,
+                      <0x4d 0x80e00000 0x00 0x00008000>,
+                      <0x4d 0x80f00000 0x00 0x00008000>;
+                reg-names = "l2sram", "l1pram", "l1dram";
+                ti,sci = <&dmsc>;
+                ti,sci-dev-id = <142>;
+                ti,sci-proc-ids = <0x03 0xFF>;
+                resets = <&k3_reset 142 1>;
+                firmware-name = "j7-c66_0-fw";
+                memory-region = <&c66_0_dma_memory_region>,
+                                <&c66_0_memory_region>;
+                mboxes = <&mailbox0_cluster3 &mbox_c66_0>;
+            };
+
+            /* J721E C71_0 DSP node */
+            c71_0: dsp@64800000 {
+                compatible = "ti,j721e-c71-dsp";
+                reg = <0x00 0x64800000 0x00 0x00080000>,
+                      <0x00 0x64e00000 0x00 0x0000c000>;
+                reg-names = "l2sram", "l1dram";
+                ti,sci = <&dmsc>;
+                ti,sci-dev-id = <15>;
+                ti,sci-proc-ids = <0x30 0xFF>;
+                resets = <&k3_reset 15 1>;
+                firmware-name = "j7-c71_0-fw";
+                memory-region = <&c71_0_dma_memory_region>,
+                                <&c71_0_memory_region>;
+                mboxes = <&mailbox0_cluster4 &mbox_c71_0>;
+            };
+        };
+    };
index b1a71c1..569cd3b 100644 (file)
@@ -24,9 +24,9 @@ properties:
   compatible:
     items:
       - enum:
-        - fsl,imx7d-src
-        - fsl,imx8mq-src
-        - fsl,imx8mp-src
+          - fsl,imx7d-src
+          - fsl,imx8mq-src
+          - fsl,imx8mp-src
       - const: syscon
 
   reg:
index 4206bf8..bc2c7e5 100644 (file)
@@ -16,16 +16,16 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-rtc
-        - ingenic,jz4760-rtc
+          - ingenic,jz4740-rtc
+          - ingenic,jz4760-rtc
       - items:
-        - const: ingenic,jz4725b-rtc
-        - const: ingenic,jz4740-rtc
+          - const: ingenic,jz4725b-rtc
+          - const: ingenic,jz4740-rtc
       - items:
-        - enum:
-          - ingenic,jz4770-rtc
-          - ingenic,jz4780-rtc
-        - const: ingenic,jz4760-rtc
+          - enum:
+              - ingenic,jz4770-rtc
+              - ingenic,jz4780-rtc
+          - const: ingenic,jz4760-rtc
 
   reg:
     maxItems: 1
index 18cb456..c7d14de 100644 (file)
@@ -52,6 +52,8 @@ properties:
       - nxp,pcf2127
       # Real-time clock
       - nxp,pcf2129
+      # Real-time clock
+      - nxp,pca2129
       # Real-time Clock Module
       - pericom,pt7c4338
       # I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
index c023d65..dc83493 100644 (file)
@@ -16,18 +16,18 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-uart
-        - ingenic,jz4760-uart
-        - ingenic,jz4780-uart
-        - ingenic,x1000-uart
+          - ingenic,jz4740-uart
+          - ingenic,jz4760-uart
+          - ingenic,jz4780-uart
+          - ingenic,x1000-uart
       - items:
-        - enum:
-          - ingenic,jz4770-uart
-          - ingenic,jz4775-uart
-        - const: ingenic,jz4760-uart
+          - enum:
+              - ingenic,jz4770-uart
+              - ingenic,jz4775-uart
+          - const: ingenic,jz4760-uart
       - items:
-        - const: ingenic,jz4725b-uart
-        - const: ingenic,jz4740-uart
+          - const: ingenic,jz4725b-uart
+          - const: ingenic,jz4740-uart
 
   reg:
     maxItems: 1
index 3cd0b70..55fffae 100644 (file)
@@ -97,13 +97,13 @@ allOf:
         clock-names:
           oneOf:
             - items:
-              - const: t0_clk
-              - const: slow_clk
+                - const: t0_clk
+                - const: slow_clk
             - items:
-              - const: t0_clk
-              - const: t1_clk
-              - const: t2_clk
-              - const: slow_clk
+                - const: t0_clk
+                - const: t1_clk
+                - const: t2_clk
+                - const: slow_clk
 
 required:
   - compatible
index a2b29cc..bd04fdb 100644 (file)
@@ -7,8 +7,8 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: GENI Serial Engine QUP Wrapper Controller
 
 maintainers:
- - Mukesh Savaliya <msavaliy@codeaurora.org>
- - Akash Asthana <akashast@codeaurora.org>
 - Mukesh Savaliya <msavaliy@codeaurora.org>
 - Akash Asthana <akashast@codeaurora.org>
 
 description: |
  Generic Interface (GENI) based Qualcomm Universal Peripheral (QUP) wrapper
@@ -38,10 +38,10 @@ properties:
       - description: Slave AHB Clock
 
   "#address-cells":
-     const: 2
+    const: 2
 
   "#size-cells":
-     const: 2
+    const: 2
 
   ranges: true
 
@@ -79,15 +79,15 @@ patternProperties:
         maxItems: 1
 
       interconnects:
-         minItems: 2
-         maxItems: 3
+        minItems: 2
+        maxItems: 3
 
       interconnect-names:
-         minItems: 2
-         items:
-           - const: qup-core
-           - const: qup-config
-           - const: qup-memory
+        minItems: 2
+        items:
+          - const: qup-core
+          - const: qup-config
+          - const: qup-memory
 
     required:
       - reg
@@ -111,10 +111,10 @@ patternProperties:
         maxItems: 1
 
       "#address-cells":
-         const: 1
+        const: 1
 
       "#size-cells":
-         const: 0
+        const: 0
 
     required:
       - compatible
@@ -136,10 +136,10 @@ patternProperties:
         maxItems: 1
 
       "#address-cells":
-         const: 1
+        const: 1
 
       "#size-cells":
-         const: 0
+        const: 0
 
       clock-frequency:
         description: Desired I2C bus clock frequency in Hz.
index f9344ad..7a7f284 100644 (file)
@@ -19,12 +19,11 @@ properties:
   compatible:
     items:
       - enum:
-        - amlogic,aiu-gxbb
-        - amlogic,aiu-gxl
-        - amlogic,aiu-meson8
-        - amlogic,aiu-meson8b
-      - const:
-          amlogic,aiu
+          - amlogic,aiu-gxbb
+          - amlogic,aiu-gxl
+          - amlogic,aiu-meson8
+          - amlogic,aiu-meson8b
+      - const: amlogic,aiu
 
   clocks:
     items:
index 51a0c30..b4b3828 100644 (file)
@@ -19,13 +19,11 @@ properties:
   compatible:
     oneOf:
       - items:
-        - const:
-            amlogic,g12a-toacodec
+          - const: amlogic,g12a-toacodec
       - items:
-        - enum:
-          - amlogic,sm1-toacodec
-        - const:
-            amlogic,g12a-toacodec
+          - enum:
+              - amlogic,sm1-toacodec
+          - const: amlogic,g12a-toacodec
 
   reg:
     maxItems: 1
index 83f44f0..5bcb643 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
 
 properties:
   compatible:
-      const: cirrus,cs42l51
+    const: cirrus,cs42l51
 
   reg:
     maxItems: 1
index 44f49be..cdc0fda 100644 (file)
@@ -16,13 +16,13 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4740-i2s
-        - ingenic,jz4760-i2s
-        - ingenic,jz4770-i2s
-        - ingenic,jz4780-i2s
+          - ingenic,jz4740-i2s
+          - ingenic,jz4760-i2s
+          - ingenic,jz4770-i2s
+          - ingenic,jz4780-i2s
       - items:
-        - const: ingenic,jz4725b-i2s
-        - const: ingenic,jz4740-i2s
+          - const: ingenic,jz4725b-i2s
+          - const: ingenic,jz4740-i2s
 
   '#sound-dai-cells':
     const: 0
index e5ac352..fea9a1b 100644 (file)
@@ -11,23 +11,21 @@ maintainers:
 
 properties:
   compatible:
-      const: maxim,max98390
+    const: maxim,max98390
 
   reg:
     maxItems: 1
     description: I2C address of the device.
 
   maxim,temperature_calib:
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
     description: The calculated temperature data was measured while doing the calibration.
+    $ref: /schemas/types.yaml#/definitions/uint32
     minimum: 0
     maximum: 65535
 
   maxim,r0_calib:
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
     description: This is r0 calibration data which was measured in factory mode.
+    $ref: /schemas/types.yaml#/definitions/uint32
     minimum: 1
     maximum: 8388607
 
index e620c77..2f2fcff 100644 (file)
@@ -48,8 +48,7 @@ properties:
 
   sound-name-prefix:
     pattern: "^DSPK[1-9]$"
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/string
+    $ref: /schemas/types.yaml#/definitions/string
     description:
       Used as prefix for sink/source names of the component. Must be a
       unique string among multiple instances of the same component.
index 1c14e83..8689d9f 100644 (file)
@@ -49,8 +49,7 @@ properties:
 
   sound-name-prefix:
     pattern: "^DMIC[1-9]$"
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/string
+    $ref: /schemas/types.yaml#/definitions/string
     description:
       used as prefix for sink/source names of the component. Must be a
       unique string among multiple instances of the same component.
index 7957970..9bbf181 100644 (file)
@@ -67,8 +67,7 @@ properties:
 
   sound-name-prefix:
     pattern: "^I2S[1-9]$"
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/string
+    $ref: /schemas/types.yaml#/definitions/string
     description:
       Used as prefix for sink/source names of the component. Must be a
       unique string among multiple instances of the same component.
index acb2b88..245895b 100644 (file)
@@ -19,16 +19,16 @@ properties:
       - const: rockchip,rk3066-i2s
       - items:
           - enum:
-            - rockchip,px30-i2s
-            - rockchip,rk3036-i2s
-            - rockchip,rk3188-i2s
-            - rockchip,rk3228-i2s
-            - rockchip,rk3288-i2s
-            - rockchip,rk3308-i2s
-            - rockchip,rk3328-i2s
-            - rockchip,rk3366-i2s
-            - rockchip,rk3368-i2s
-            - rockchip,rk3399-i2s
+              - rockchip,px30-i2s
+              - rockchip,rk3036-i2s
+              - rockchip,rk3188-i2s
+              - rockchip,rk3228-i2s
+              - rockchip,rk3288-i2s
+              - rockchip,rk3308-i2s
+              - rockchip,rk3328-i2s
+              - rockchip,rk3366-i2s
+              - rockchip,rk3368-i2s
+              - rockchip,rk3399-i2s
           - const: rockchip,rk3066-i2s
 
   reg:
@@ -55,8 +55,8 @@ properties:
     oneOf:
       - const: rx
       - items:
-        - const: tx
-        - const: rx
+          - const: tx
+          - const: rx
 
   power-domains:
     maxItems: 1
index c467152..7bad6f1 100644 (file)
@@ -25,8 +25,8 @@ properties:
       - const: rockchip,rk3399-spdif
       - items:
           - enum:
-            - rockchip,rk3188-spdif
-            - rockchip,rk3288-spdif
+              - rockchip,rk3188-spdif
+              - rockchip,rk3288-spdif
           - const: rockchip,rk3066-spdif
 
   reg:
index 8192450..33a90f8 100644 (file)
@@ -44,8 +44,8 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
     description: Sets TDM RX capture edge.
     enum:
-          - 0 # Rising edge
-          - 1 # Falling edge
+      - 0 # Rising edge
+      - 1 # Falling edge
 
   '#sound-dai-cells':
     const: 1
index 6f2be65..d52cfbe 100644 (file)
@@ -37,13 +37,11 @@ properties:
 
   ti,cpb-mcasp:
     description: phandle to McASP used on CPB
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   ti,cpb-codec:
     description: phandle to the pcm3168a codec used on the CPB
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   clocks:
     items:
index e0b8847..bb780f6 100644 (file)
@@ -50,28 +50,23 @@ properties:
 
   ti,cpb-mcasp:
     description: phandle to McASP used on CPB
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   ti,cpb-codec:
     description: phandle to the pcm3168a codec used on the CPB
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   ti,ivi-mcasp:
     description: phandle to McASP used on IVI
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   ti,ivi-codec-a:
     description: phandle to the pcm3168a-A codec on the expansion board
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   ti,ivi-codec-b:
     description: phandle to the pcm3168a-B codec on the expansion board
-    allOf:
-      - $ref: /schemas/types.yaml#/definitions/phandle
+    $ref: /schemas/types.yaml#/definitions/phandle
 
   clocks:
     items:
index e84d4a2..f578f17 100644 (file)
@@ -32,32 +32,32 @@ properties:
   reg:
     maxItems: 1
     description: |
-       I2C addresss of the device can be one of these 0x4c, 0x4d, 0x4e or 0x4f
+      I2C addresss of the device can be one of these 0x4c, 0x4d, 0x4e or 0x4f
 
   reset-gpios:
     description: |
-       GPIO used for hardware reset.
+      GPIO used for hardware reset.
 
   areg-supply:
-      description: |
-       Regulator with AVDD at 3.3V.  If not defined then the internal regulator
-       is enabled.
+    description: |
+      Regulator with AVDD at 3.3V.  If not defined then the internal regulator
+      is enabled.
 
   ti,mic-bias-source:
     description: |
-       Indicates the source for MIC Bias.
-       0 - Mic bias is set to VREF
-       1 - Mic bias is set to VREF × 1.096
-       6 - Mic bias is set to AVDD
+      Indicates the source for MIC Bias.
+      0 - Mic bias is set to VREF
+      1 - Mic bias is set to VREF × 1.096
+      6 - Mic bias is set to AVDD
     $ref: /schemas/types.yaml#/definitions/uint32
     enum: [0, 1, 6]
 
   ti,vref-source:
     description: |
-       Indicates the source for MIC Bias.
-       0 - Set VREF to 2.75V
-       1 - Set VREF to 2.5V
-       2 - Set VREF to 1.375V
+      Indicates the source for MIC Bias.
+      0 - Set VREF to 2.75V
+      1 - Set VREF to 2.5V
+      2 - Set VREF to 1.375V
     $ref: /schemas/types.yaml#/definitions/uint32
     enum: [0, 1, 2]
 
@@ -109,7 +109,7 @@ properties:
     default: [0, 0, 0, 0]
 
 patternProperties:
- '^ti,gpo-config-[1-4]$':
 '^ti,gpo-config-[1-4]$':
     $ref: /schemas/types.yaml#/definitions/uint32-array
     description: |
        Defines the configuration and output driver for the general purpose
index 243a6b1..7866a65 100644 (file)
@@ -22,10 +22,10 @@ properties:
       - const: allwinner,sun6i-a31-spi
       - const: allwinner,sun8i-h3-spi
       - items:
-        - enum:
-          - allwinner,sun8i-r40-spi
-          - allwinner,sun50i-h6-spi
-        - const: allwinner,sun8i-h3-spi
+          - enum:
+              - allwinner,sun8i-r40-spi
+              - allwinner,sun50i-h6-spi
+          - const: allwinner,sun8i-h3-spi
 
   reg:
     maxItems: 1
index 6e44c9c..50df1a4 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale (Enhanced) Configurable Serial Peripheral Interface (CSPI/eCSPI) for i.MX
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
 
 allOf:
   - $ref: "/schemas/spi/spi-controller.yaml#"
@@ -23,19 +23,19 @@ properties:
       - const: fsl,imx51-ecspi
       - const: fsl,imx53-ecspi
       - items:
-        - enum:
-          - fsl,imx50-ecspi
-          - fsl,imx6q-ecspi
-          - fsl,imx6sx-ecspi
-          - fsl,imx6sl-ecspi
-          - fsl,imx6sll-ecspi
-          - fsl,imx6ul-ecspi
-          - fsl,imx7d-ecspi
-          - fsl,imx8mq-ecspi
-          - fsl,imx8mm-ecspi
-          - fsl,imx8mn-ecspi
-          - fsl,imx8mp-ecspi
-        - const: fsl,imx51-ecspi
+          - enum:
+              - fsl,imx50-ecspi
+              - fsl,imx6q-ecspi
+              - fsl,imx6sx-ecspi
+              - fsl,imx6sl-ecspi
+              - fsl,imx6sll-ecspi
+              - fsl,imx6ul-ecspi
+              - fsl,imx7d-ecspi
+              - fsl,imx8mq-ecspi
+              - fsl,imx8mm-ecspi
+              - fsl,imx8mn-ecspi
+              - fsl,imx8mp-ecspi
+          - const: fsl,imx51-ecspi
 
   reg:
     maxItems: 1
index 22882e7..312d8fe 100644 (file)
@@ -39,6 +39,7 @@ properties:
       spi common code does not support use of CS signals discontinuously.
       i.MX8DXL-EVK board only uses CS1 without using CS0. Therefore, add
       this property to re-config the chipselect value in the LPSPI driver.
+    type: boolean
 
 required:
   - compatible
index 0ae692d..3d3fed6 100644 (file)
@@ -43,47 +43,47 @@ properties:
     maxItems: 1
 
 required:
-   - compatible
-   - reg
-   - spi-max-frequency
-   - mux-controls
+  - compatible
+  - reg
+  - spi-max-frequency
+  - mux-controls
 
 examples:
-   - |
-     #include <dt-bindings/gpio/gpio.h>
-     mux: mux-controller {
-       compatible = "gpio-mux";
-       #mux-control-cells = <0>;
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    mux: mux-controller {
+        compatible = "gpio-mux";
+        #mux-control-cells = <0>;
 
-       mux-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
-     };
+        mux-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
+    };
 
-     spi {
-       #address-cells = <1>;
-       #size-cells = <0>;
-       spi@0 {
-         compatible = "spi-mux";
-         reg = <0>;
-         #address-cells = <1>;
-         #size-cells = <0>;
-         spi-max-frequency = <100000000>;
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        spi@0 {
+            compatible = "spi-mux";
+            reg = <0>;
+            #address-cells = <1>;
+            #size-cells = <0>;
+            spi-max-frequency = <100000000>;
 
-         mux-controls = <&mux>;
+            mux-controls = <&mux>;
 
-         spi-flash@0 {
-           compatible = "jedec,spi-nor";
-           reg = <0>;
-           #address-cells = <1>;
-           #size-cells = <0>;
-           spi-max-frequency = <40000000>;
-         };
+            spi-flash@0 {
+                compatible = "jedec,spi-nor";
+                reg = <0>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+                spi-max-frequency = <40000000>;
+            };
 
-         spi-device@1 {
-           compatible = "lineartechnology,ltc2488";
-           reg = <1>;
-           #address-cells = <1>;
-           #size-cells = <0>;
-           spi-max-frequency = <10000000>;
-         };
-       };
-     };
+            spi-device@1 {
+                compatible = "lineartechnology,ltc2488";
+                reg = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+                spi-max-frequency = <10000000>;
+            };
+        };
+    };
index 81ad4b7..74dc618 100644 (file)
@@ -26,13 +26,13 @@ properties:
       - const: rockchip,rv1108-spi
       - items:
           - enum:
-            - rockchip,px30-spi
-            - rockchip,rk3188-spi
-            - rockchip,rk3288-spi
-            - rockchip,rk3308-spi
-            - rockchip,rk3328-spi
-            - rockchip,rk3368-spi
-            - rockchip,rk3399-spi
+              - rockchip,px30-spi
+              - rockchip,rk3188-spi
+              - rockchip,rk3288-spi
+              - rockchip,rk3308-spi
+              - rockchip,rk3328-spi
+              - rockchip,rk3368-spi
+              - rockchip,rk3399-spi
           - const: rockchip,rk3066-spi
 
   reg:
index aedac16..16b57f5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP i.MX Thermal Binding
 
 maintainers:
-  - Shawn Guo <shawn.guo@linaro.org>
+  - Shawn Guo <shawnguo@kernel.org>
   - Anson Huang <Anson.Huang@nxp.com>
 
 properties:
index 5145883..ad4beaf 100644 (file)
@@ -44,9 +44,9 @@ select: true
 properties:
   "#cooling-cells":
     description:
-        Must be 2, in order to specify minimum and maximum cooling state used in
-        the cooling-maps reference. The first cell is the minimum cooling state
-        and the second cell is the maximum cooling state requested.
+      Must be 2, in order to specify minimum and maximum cooling state used in
+      the cooling-maps reference. The first cell is the minimum cooling state
+      and the second cell is the maximum cooling state requested.
     const: 2
 
 examples:
index 7a922f5..a832d42 100644 (file)
@@ -18,29 +18,28 @@ description: |
   This binding describes the thermal idle node.
 
 properties:
-   $nodename:
-     const: thermal-idle
-     description: |
-        A thermal-idle node describes the idle cooling device properties to
-        cool down efficiently the attached thermal zone.
-
-   '#cooling-cells':
-      const: 2
-      description: |
-         Must be 2, in order to specify minimum and maximum cooling state used in
-         the cooling-maps reference. The first cell is the minimum cooling state
-         and the second cell is the maximum cooling state requested.
-
-   duration-us:
-      description: |
-         The idle duration in microsecond the device should cool down.
-
-   exit-latency-us:
-      description: |
-         The exit latency constraint in microsecond for the injected
-         idle state for the device. It is the latency constraint to
-         apply when selecting an idle state from among all the present
-         ones.
+  $nodename:
+    const: thermal-idle
+    description: |
+      A thermal-idle node describes the idle cooling device properties to
+      cool down efficiently the attached thermal zone.
+
+  '#cooling-cells':
+    const: 2
+    description: |
+      Must be 2, in order to specify minimum and maximum cooling state used in
+      the cooling-maps reference. The first cell is the minimum cooling state
+      and the second cell is the maximum cooling state requested.
+
+  duration-us:
+    description: |
+      The idle duration in microsecond the device should cool down.
+
+  exit-latency-us:
+    description: |
+      The exit latency constraint in microsecond for the injected idle state 
+      for the device. It is the latency constraint to apply when selecting an 
+      idle state from among all the present ones.
 
 required:
   - '#cooling-cells'
index 883f7f4..a4f51f4 100644 (file)
@@ -20,17 +20,17 @@ properties:
       - const: fsl,imx31-gpt
       - items:
           - enum:
-            - fsl,imx25-gpt
-            - fsl,imx50-gpt
-            - fsl,imx51-gpt
-            - fsl,imx53-gpt
-            - fsl,imx6q-gpt
+              - fsl,imx25-gpt
+              - fsl,imx50-gpt
+              - fsl,imx51-gpt
+              - fsl,imx53-gpt
+              - fsl,imx6q-gpt
           - const: fsl,imx31-gpt
       - const: fsl,imx6dl-gpt
       - items:
           - enum:
-            - fsl,imx6sl-gpt
-            - fsl,imx6sx-gpt
+              - fsl,imx6sl-gpt
+              - fsl,imx6sx-gpt
           - const: fsl,imx6dl-gpt
 
   reg:
index 371fb02..024bcad 100644 (file)
@@ -49,16 +49,16 @@ properties:
   compatible:
     oneOf:
       - items:
-        - enum:
-          - ingenic,jz4740-tcu
-          - ingenic,jz4725b-tcu
-          - ingenic,jz4770-tcu
-          - ingenic,x1000-tcu
-        - const: simple-mfd
+          - enum:
+              - ingenic,jz4740-tcu
+              - ingenic,jz4725b-tcu
+              - ingenic,jz4770-tcu
+              - ingenic,x1000-tcu
+          - const: simple-mfd
       - items:
-        - const: ingenic,jz4780-tcu
-        - const: ingenic,jz4770-tcu
-        - const: simple-mfd
+          - const: ingenic,jz4780-tcu
+          - const: ingenic,jz4770-tcu
+          - const: simple-mfd
 
   reg:
     maxItems: 1
@@ -113,13 +113,13 @@ patternProperties:
       compatible:
         oneOf:
           - enum:
-            - ingenic,jz4740-watchdog
-            - ingenic,jz4780-watchdog
+              - ingenic,jz4740-watchdog
+              - ingenic,jz4780-watchdog
           - items:
-            - enum:
-              - ingenic,jz4770-watchdog
-              - ingenic,jz4725b-watchdog
-            - const: ingenic,jz4740-watchdog
+              - enum:
+                  - ingenic,jz4770-watchdog
+                  - ingenic,jz4725b-watchdog
+              - const: ingenic,jz4740-watchdog
 
       reg:
         maxItems: 1
@@ -143,13 +143,13 @@ patternProperties:
       compatible:
         oneOf:
           - enum:
-            - ingenic,jz4740-pwm
-            - ingenic,jz4725b-pwm
+              - ingenic,jz4740-pwm
+              - ingenic,jz4725b-pwm
           - items:
-            - enum:
-              - ingenic,jz4770-pwm
-              - ingenic,jz4780-pwm
-            - const: ingenic,jz4740-pwm
+              - enum:
+                  - ingenic,jz4770-pwm
+                  - ingenic,jz4780-pwm
+              - const: ingenic,jz4740-pwm
 
       reg:
         maxItems: 1
@@ -182,11 +182,11 @@ patternProperties:
       compatible:
         oneOf:
           - enum:
-            - ingenic,jz4725b-ost
-            - ingenic,jz4770-ost
+              - ingenic,jz4725b-ost
+              - ingenic,jz4770-ost
           - items:
-            - const: ingenic,jz4780-ost
-            - const: ingenic,jz4770-ost
+              - const: ingenic,jz4780-ost
+              - const: ingenic,jz4770-ost
 
       reg:
         maxItems: 1
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
new file mode 100644 (file)
index 0000000..2a0e9cd
--- /dev/null
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/sifive,clint.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive Core Local Interruptor
+
+maintainers:
+  - Palmer Dabbelt <palmer@dabbelt.com>
+  - Anup Patel <anup.patel@wdc.com>
+
+description:
+  SiFive (and other RISC-V) SOCs include an implementation of the SiFive
+  Core Local Interruptor (CLINT) for M-mode timer and M-mode inter-processor
+  interrupts. It directly connects to the timer and inter-processor interrupt
+  lines of various HARTs (or CPUs) so RISC-V per-HART (or per-CPU) local
+  interrupt controller is the parent interrupt controller for CLINT device.
+  The clock frequency of CLINT is specified via "timebase-frequency" DT
+  property of "/cpus" DT node. The "timebase-frequency" DT property is
+  described in Documentation/devicetree/bindings/riscv/cpus.yaml
+
+properties:
+  compatible:
+    items:
+      - const: sifive,fu540-c000-clint
+      - const: sifive,clint0
+
+    description:
+      Should be "sifive,<chip>-clint" and "sifive,clint<version>".
+      Supported compatible strings are -
+      "sifive,fu540-c000-clint" for the SiFive CLINT v0 as integrated
+      onto the SiFive FU540 chip, and "sifive,clint0" for the SiFive
+      CLINT v0 IP block with no chip integration tweaks.
+      Please refer to sifive-blocks-ip-versioning.txt for details
+
+  reg:
+    maxItems: 1
+
+  interrupts-extended:
+    minItems: 1
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts-extended
+
+examples:
+  - |
+    timer@2000000 {
+      compatible = "sifive,fu540-c000-clint", "sifive,clint0";
+      interrupts-extended = <&cpu1intc 3 &cpu1intc 7
+                             &cpu2intc 3 &cpu2intc 7
+                             &cpu3intc 3 &cpu3intc 7
+                             &cpu4intc 3 &cpu4intc 7>;
+       reg = <0x2000000 0x10000>;
+    };
+...
index 5d300ef..7b39e32 100644 (file)
@@ -27,8 +27,8 @@ properties:
   clocks:
     minItems: 1
     items:
-       - description: Timer ticks reference clock source
-       - description: APB interface clock source
+      - description: Timer ticks reference clock source
+      - description: APB interface clock source
 
   clock-names:
     minItems: 1
index b7e94fe..4ace803 100644 (file)
@@ -298,7 +298,7 @@ properties:
           - national,lm80
             # Temperature sensor with integrated fan control
           - national,lm85
-            # ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
+            # I2C ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator
           - national,lm92
             # i2c trusted platform module (TPM)
           - nuvoton,npct501
index 4ff632d..ffa157a 100644 (file)
@@ -19,24 +19,24 @@ properties:
           - const: snps,dwc2
       - items:
           - enum:
-            - rockchip,px30-usb
-            - rockchip,rk3036-usb
-            - rockchip,rk3188-usb
-            - rockchip,rk3228-usb
-            - rockchip,rk3288-usb
-            - rockchip,rk3328-usb
-            - rockchip,rk3368-usb
-            - rockchip,rv1108-usb
+              - rockchip,px30-usb
+              - rockchip,rk3036-usb
+              - rockchip,rk3188-usb
+              - rockchip,rk3228-usb
+              - rockchip,rk3288-usb
+              - rockchip,rk3328-usb
+              - rockchip,rk3368-usb
+              - rockchip,rv1108-usb
           - const: rockchip,rk3066-usb
           - const: snps,dwc2
       - const: lantiq,arx100-usb
       - const: lantiq,xrx200-usb
       - items:
           - enum:
-            - amlogic,meson8-usb
-            - amlogic,meson8b-usb
-            - amlogic,meson-gxbb-usb
-            - amlogic,meson-g12a-usb
+              - amlogic,meson8-usb
+              - amlogic,meson8b-usb
+              - amlogic,meson-gxbb-usb
+              - amlogic,meson-g12a-usb
           - const: snps,dwc2
       - const: amcc,dwc-otg
       - const: snps,dwc2
@@ -116,12 +116,13 @@ properties:
 
   snps,need-phy-for-wake:
     $ref: /schemas/types.yaml#/definitions/flag
-    description: If present indicates that the phy needs to be left on for remote wakeup during suspend.
+    description: If present indicates that the phy needs to be left on for 
+      remote wakeup during suspend.
 
   snps,reset-phy-on-wake:
     $ref: /schemas/types.yaml#/definitions/flag
-    description: If present indicates that we need to reset the PHY when we detect a wakeup.
-                 This is due to a hardware errata.
+    description: If present indicates that we need to reset the PHY when we 
+      detect a wakeup. This is due to a hardware errata.
 
 required:
   - compatible
index 69f3f26..247ef00 100644 (file)
@@ -80,7 +80,7 @@ properties:
   companion:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
-     Phandle of a companion.
+      Phandle of a companion.
 
   phys:
     description: PHY specifier for the USB PHY
index c334aea..678396e 100644 (file)
@@ -16,11 +16,11 @@ properties:
   compatible:
     oneOf:
       - enum:
-        - ingenic,jz4770-musb
-        - ingenic,jz4740-musb
+          - ingenic,jz4770-musb
+          - ingenic,jz4740-musb
       - items:
-        - const: ingenic,jz4725b-musb
-        - const: ingenic,jz4740-musb
+          - const: ingenic,jz4725b-musb
+          - const: ingenic,jz4740-musb
 
   reg:
     maxItems: 1
index 0073763..196589c 100644 (file)
@@ -57,11 +57,11 @@ properties:
     minItems: 4
     maxItems: 5
     items:
-     - const: dev
-     - const: ss
-     - const: ss_src
-     - const: fs_src
-     - const: hs_src
+      - const: dev
+      - const: ss
+      - const: ss_src
+      - const: fs_src
+      - const: hs_src
 
   power-domains:
     items:
index 9075025..484fc10 100644 (file)
@@ -19,9 +19,9 @@ properties:
 
   power-domains:
     description:
-       PM domain provider node and an args specifier containing
-       the USB device id value. See,
-       Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+      PM domain provider node and an args specifier containing
+      the USB device id value. See,
+      Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
 
   clocks:
     description: Clock phandles to usb2_refclk and lpm_clk
index 804b9b4..c1b19fc 100644 (file)
@@ -13,8 +13,8 @@ properties:
   compatible:
     items:
       - enum:
-        - ti,keystone-dwc3
-        - ti,am654-dwc3
+          - ti,keystone-dwc3
+          - ti,am654-dwc3
 
   reg:
     maxItems: 1
index f3d8478..63996ab 100644 (file)
@@ -993,7 +993,8 @@ patternProperties:
   "^sst,.*":
     description: Silicon Storage Technology, Inc.
   "^sstar,.*":
-    description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd. (formerly part of MStar Semiconductor, Inc.)
+    description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd.
+      (formerly part of MStar Semiconductor, Inc.)
   "^st,.*":
     description: STMicroelectronics
   "^starry,.*":
index e60b9a1..aa10b8e 100644 (file)
@@ -11,8 +11,8 @@ Optional properties:
           See clock-bindings.txt
 
 Documentation:
-Davinci DM646x - http://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
-Keystone - http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+Davinci DM646x - https://www.ti.com/lit/ug/spruer5b/spruer5b.pdf
+Keystone - https://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/watchdog/dw_wdt.txt b/Documentation/devicetree/bindings/watchdog/dw_wdt.txt
deleted file mode 100644 (file)
index eb09144..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-Synopsys Designware Watchdog Timer
-
-Required Properties:
-
-- compatible   : Should contain "snps,dw-wdt"
-- reg          : Base address and size of the watchdog timer registers.
-- clocks       : phandle + clock-specifier for the clock that drives the
-               watchdog timer.
-
-Optional Properties:
-
-- interrupts   : The interrupt used for the watchdog timeout warning.
-- resets       : phandle pointing to the system reset controller with
-               line index for the watchdog.
-
-Example:
-
-       watchdog0: wd@ffd02000 {
-               compatible = "snps,dw-wdt";
-               reg = <0xffd02000 0x1000>;
-               interrupts = <0 171 4>;
-               clocks = <&per_base_clk>;
-               resets = <&rst WDT0_RESET>;
-       };
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
deleted file mode 100644 (file)
index 41aeaa2..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Qualcomm Krait Processor Sub-system (KPSS) Watchdog
----------------------------------------------------
-
-Required properties :
-- compatible : shall contain only one of the following:
-
-                       "qcom,kpss-wdt-msm8960"
-                       "qcom,kpss-wdt-apq8064"
-                       "qcom,kpss-wdt-ipq8064"
-                       "qcom,kpss-wdt-ipq4019"
-                       "qcom,kpss-timer"
-                       "qcom,scss-timer"
-                       "qcom,kpss-wdt"
-
-- reg : shall contain base register location and length
-- clocks : shall contain the input clock
-
-Optional properties :
-- timeout-sec : shall contain the default watchdog timeout in seconds,
-                if unset, the default timeout is 30 seconds
-
-Example:
-       watchdog@208a038 {
-               compatible = "qcom,kpss-wdt-ipq8064";
-               reg = <0x0208a038 0x40>;
-               clocks = <&sleep_clk>;
-               timeout-sec = <10>;
-       };
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
new file mode 100644 (file)
index 0000000..0709ddf
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/qcom-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Krait Processor Sub-system (KPSS) Watchdog timer
+
+maintainers:
+  - Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
+
+allOf:
+  - $ref: watchdog.yaml#
+
+properties:
+  compatible:
+    enum:
+      - qcom,apss-wdt-qcs404
+      - qcom,apss-wdt-sc7180
+      - qcom,apss-wdt-sdm845
+      - qcom,apss-wdt-sm8150
+      - qcom,kpss-timer
+      - qcom,kpss-wdt
+      - qcom,kpss-wdt-apq8064
+      - qcom,kpss-wdt-ipq4019
+      - qcom,kpss-wdt-ipq8064
+      - qcom,kpss-wdt-msm8960
+      - qcom,scss-timer
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+
+examples:
+  - |
+    watchdog@208a038 {
+      compatible = "qcom,kpss-wdt-ipq8064";
+      reg = <0x0208a038 0x40>;
+      clocks = <&sleep_clk>;
+      timeout-sec = <10>;
+    };
index 572f4c9..6933005 100644 (file)
@@ -41,6 +41,7 @@ properties:
               - renesas,r8a774a1-wdt     # RZ/G2M
               - renesas,r8a774b1-wdt     # RZ/G2N
               - renesas,r8a774c0-wdt     # RZ/G2E
+              - renesas,r8a774e1-wdt     # RZ/G2H
               - renesas,r8a7795-wdt      # R-Car H3
               - renesas,r8a7796-wdt      # R-Car M3-W
               - renesas,r8a77961-wdt     # R-Car M3-W+
diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml
new file mode 100644 (file)
index 0000000..d9fc7bb
--- /dev/null
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/snps,dw-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys Designware Watchdog Timer
+
+allOf:
+  - $ref: "watchdog.yaml#"
+
+maintainers:
+  - Jamie Iles <jamie@jamieiles.com>
+
+properties:
+  compatible:
+    const: snps,dw-wdt
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    description: DW Watchdog pre-timeout interrupt
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    items:
+      - description: Watchdog timer reference clock
+      - description: APB3 interface clock
+
+  clock-names:
+    minItems: 1
+    items:
+      - const: tclk
+      - const: pclk
+
+  resets:
+    description: Phandle to the DW Watchdog reset lane
+    maxItems: 1
+
+  snps,watchdog-tops:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    description: |
+      DW APB Watchdog custom timer intervals - Timeout Period ranges (TOPs).
+      Each TOP is a number loaded into the watchdog counter at the moment of
+      the timer restart. The counter decrementing happens each tick of the
+      reference clock. Therefore the TOPs array is equivalent to an array of
+      the timer expiration intervals supported by the DW APB Watchdog. Note
+      DW APB Watchdog IP-core might be synthesized with fixed TOP values,
+      in which case this property is unnecessary with default TOPs utilized.
+    default: [0x0001000 0x0002000 0x0004000 0x0008000
+      0x0010000 0x0020000 0x0040000 0x0080000
+      0x0100000 0x0200000 0x0400000 0x0800000
+      0x1000000 0x2000000 0x4000000 0x8000000]
+    minItems: 16
+    maxItems: 16
+
+unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+
+examples:
+  - |
+    watchdog@ffd02000 {
+      compatible = "snps,dw-wdt";
+      reg = <0xffd02000 0x1000>;
+      interrupts = <0 171 4>;
+      clocks = <&per_base_clk>;
+      resets = <&wdt_rst>;
+    };
+
+  - |
+    watchdog@ffd02000 {
+      compatible = "snps,dw-wdt";
+      reg = <0xffd02000 0x1000>;
+      interrupts = <0 171 4>;
+      clocks = <&per_base_clk>;
+      clock-names = "tclk";
+      snps,watchdog-tops = <0x000000FF 0x000001FF 0x000003FF
+                            0x000007FF 0x0000FFFF 0x0001FFFF
+                            0x0003FFFF 0x0007FFFF 0x000FFFFF
+                            0x001FFFFF 0x003FFFFF 0x007FFFFF
+                            0x00FFFFFF 0x01FFFFFF 0x03FFFFFF
+                            0x07FFFFFF>;
+    };
+...
index 8c74a99..16f21e1 100644 (file)
@@ -5,7 +5,7 @@ Writing DeviceTree Bindings in json-schema
 
 Devicetree bindings are written using json-schema vocabulary. Schema files are
 written in a JSON compatible subset of YAML. YAML is used instead of JSON as it
-considered more human readable and has some advantages such as allowing
+is considered more human readable and has some advantages such as allowing
 comments (Prefixed with '#').
 
 Schema Contents
@@ -19,7 +19,7 @@ $id
   A json-schema unique identifier string. The string must be a valid
   URI typically containing the binding's filename and path. For DT schema, it must
   begin with "http://devicetree.org/schemas/". The URL is used in constructing
-  references to other files specified in schema "$ref" properties. A $ref values
+  references to other files specified in schema "$ref" properties. A $ref value
   with a leading '/' will have the hostname prepended. A $ref value a relative
   path or filename only will be prepended with the hostname and path components
   of the current schema file's '$id' value. A URL is used even for local files,
index 902b93b..5ef2cfe 100644 (file)
@@ -100,7 +100,6 @@ available subsections can be seen below.
    rfkill
    serial/index
    sm501
-   smsc_ece1099
    switchtec
    sync_file
    vfio-mediated-device
diff --git a/Documentation/driver-api/nvdimm/firmware-activate.rst b/Documentation/driver-api/nvdimm/firmware-activate.rst
new file mode 100644 (file)
index 0000000..7ee7dec
--- /dev/null
@@ -0,0 +1,86 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+NVDIMM Runtime Firmware Activation
+==================================
+
+Some persistent memory devices run a firmware locally on the device /
+"DIMM" to perform tasks like media management, capacity provisioning,
+and health monitoring. The process of updating that firmware typically
+involves a reboot because it has implications for in-flight memory
+transactions. However, reboots are disruptive and at least the Intel
+persistent memory platform implementation, described by the Intel ACPI
+DSM specification [1], has added support for activating firmware at
+runtime.
+
+A native sysfs interface is implemented in libnvdimm to allow platform
+to advertise and control their local runtime firmware activation
+capability.
+
+The libnvdimm bus object, ndbusX, implements an ndbusX/firmware/activate
+attribute that shows the state of the firmware activation as one of 'idle',
+'armed', 'overflow', and 'busy'.
+
+- idle:
+  No devices are set / armed to activate firmware
+
+- armed:
+  At least one device is armed
+
+- busy:
+  In the busy state armed devices are in the process of transitioning
+  back to idle and completing an activation cycle.
+
+- overflow:
+  If the platform has a concept of incremental work needed to perform
+  the activation it could be the case that too many DIMMs are armed for
+  activation. In that scenario the potential for firmware activation to
+  timeout is indicated by the 'overflow' state.
+
+The 'ndbusX/firmware/activate' property can be written with a value of
+either 'live', or 'quiesce'. A value of 'quiesce' triggers the kernel to
+run firmware activation from within the equivalent of the hibernation
+'freeze' state where drivers and applications are notified to stop their
+modifications of system memory. A value of 'live' attempts
+firmware activation without this hibernation cycle. The
+'ndbusX/firmware/activate' property will be elided completely if no
+firmware activation capability is detected.
+
+Another property 'ndbusX/firmware/capability' indicates a value of
+'live' or 'quiesce', where 'live' indicates that the firmware
+does not require or inflict any quiesce period on the system to update
+firmware. A capability value of 'quiesce' indicates that firmware does
+expect and injects a quiet period for the memory controller, but 'live'
+may still be written to 'ndbusX/firmware/activate' as an override to
+assume the risk of racing firmware update with in-flight device and
+application activity. The 'ndbusX/firmware/capability' property will be
+elided completely if no firmware activation capability is detected.
+
+The libnvdimm memory-device / DIMM object, nmemX, implements
+'nmemX/firmware/activate' and 'nmemX/firmware/result' attributes to
+communicate the per-device firmware activation state. Similar to the
+'ndbusX/firmware/activate' attribute, the 'nmemX/firmware/activate'
+attribute indicates 'idle', 'armed', or 'busy'. The state transitions
+from 'armed' to 'idle' when the system is prepared to activate firmware,
+firmware staged + state set to armed, and 'ndbusX/firmware/activate' is
+triggered. After that activation event the nmemX/firmware/result
+attribute reflects the state of the last activation as one of:
+
+- none:
+  No runtime activation triggered since the last time the device was reset
+
+- success:
+  The last runtime activation completed successfully.
+
+- fail:
+  The last runtime activation failed for device-specific reasons.
+
+- not_staged:
+  The last runtime activation failed due to a sequencing error of the
+  firmware image not being staged.
+
+- need_reset:
+  Runtime firmware activation failed, but the firmware can still be
+  activated via the legacy method of power-cycling the system.
+
+[1]: https://docs.pmem.io/persistent-memory/
diff --git a/Documentation/driver-api/smsc_ece1099.rst b/Documentation/driver-api/smsc_ece1099.rst
deleted file mode 100644 (file)
index 0792774..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-=================================================
-Msc Keyboard Scan Expansion/GPIO Expansion device
-=================================================
-
-What is smsc-ece1099?
-----------------------
-
-The ECE1099 is a 40-Pin 3.3V Keyboard Scan Expansion
-or GPIO Expansion device. The device supports a keyboard
-scan matrix of 23x8. The device is connected to a Master
-via the SMSC BC-Link interface or via the SMBus.
-Keypad scan Input(KSI) and Keypad Scan Output(KSO) signals
-are multiplexed with GPIOs.
-
-Interrupt generation
---------------------
-
-Interrupts can be generated by an edge detection on a GPIO
-pin or an edge detection on one of the bus interface pins.
-Interrupts can also be detected on the keyboard scan interface.
-The bus interrupt pin (BC_INT# or SMBUS_INT#) is asserted if
-any bit in one of the Interrupt Status registers is 1 and
-the corresponding Interrupt Mask bit is also 1.
-
-In order for software to determine which device is the source
-of an interrupt, it should first read the Group Interrupt Status Register
-to determine which Status register group is a source for the interrupt.
-Software should read both the Status register and the associated Mask register,
-then AND the two values together. Bits that are 1 in the result of the AND
-are active interrupts. Software clears an interrupt by writing a 1 to the
-corresponding bit in the Status register.
-
-Communication Protocol
-----------------------
-
-- SMbus slave Interface
-       The host processor communicates with the ECE1099 device
-       through a series of read/write registers via the SMBus
-       interface. SMBus is a serial communication protocol between
-       a computer host and its peripheral devices. The SMBus data
-       rate is 10KHz minimum to 400 KHz maximum
-
-- Slave Bus Interface
-       The ECE1099 device SMBus implementation is a subset of the
-       SMBus interface to the host. The device is a slave-only SMBus device.
-       The implementation in the device is a subset of SMBus since it
-       only supports four protocols.
-
-       The Write Byte, Read Byte, Send Byte, and Receive Byte protocols are the
-       only valid SMBus protocols for the device.
-
-- BC-LinkTM Interface
-       The BC-Link is a proprietary bus that allows communication
-       between a Master device and a Companion device. The Master
-       device uses this serial bus to read and write registers
-       located on the Companion device. The bus comprises three signals,
-       BC_CLK, BC_DAT and BC_INT#. The Master device always provides the
-       clock, BC_CLK, and the Companion device is the source for an
-       independent asynchronous interrupt signal, BC_INT#. The ECE1099
-       supports BC-Link speeds up to 24MHz.
index b7e4f36..2db7680 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: |  ok  |
     |          sh: |  ok  |
     |       sparc: |  ok  |
index 3db4763..3329559 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: |  ok  |
     |       sparc: | TODO |
index 4f844ec..940b0bd 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: |  ok  |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: |  ok  |
     |          sh: |  ok  |
     |       sparc: |  ok  |
index a71f3a9..266c81e 100644 (file)
@@ -23,7 +23,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: | TODO |
     |       sparc: |  ok  |
index 0aadba0..cc76b57 100644 (file)
@@ -39,6 +39,6 @@ entry.
 Other References
 ----------------
 
-Also see http://www.nongnu.org/ext2-doc/ for quite a collection of
+Also see https://www.nongnu.org/ext2-doc/ for quite a collection of
 information about ext2/3. Here's another old reference:
 http://wiki.osdev.org/Ext2
index a11d329..ec8d997 100644 (file)
@@ -258,6 +258,8 @@ compress_extension=%s        Support adding specified extension, so that f2fs can enab
                         on compression extension list and enable compression on
                         these file by default rather than to enable it via ioctl.
                         For other files, we can still enable compression via ioctl.
+                        Note that, there is one reserved special extension '*', it
+                        can be set to enable compression for all files.
 inlinecrypt             When possible, encrypt/decrypt the contents of encrypted
                         files using the blk-crypto framework rather than
                         filesystem-layer encryption. This allows the use of
@@ -743,8 +745,8 @@ Compression implementation
 
 - In order to eliminate write amplification during overwrite, F2FS only
   support compression on write-once file, data can be compressed only when
-  all logical blocks in file are valid and cluster compress ratio is lower
-  than specified threshold.
+  all logical blocks in cluster contain valid data and compress ratio of
+  cluster data is lower than specified threshold.
 
 - To enable compression on regular inode, there are three ways:
 
index 58ce6b3..7e2be2f 100644 (file)
@@ -10,27 +10,27 @@ Details
 The journalling layer is easy to use. You need to first of all create a
 journal_t data structure. There are two calls to do this dependent on
 how you decide to allocate the physical media on which the journal
-resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in
-filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used
+resides. The jbd2_journal_init_inode() call is for journals stored in
+filesystem inodes, or the jbd2_journal_init_dev() call can be used
 for journal stored on a raw device (in a continuous range of blocks). A
 journal_t is a typedef for a struct pointer, so when you are finally
-finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up
+finished make sure you call jbd2_journal_destroy() on it to free up
 any used kernel memory.
 
 Once you have got your journal_t object you need to 'mount' or load the
 journal file. The journalling layer expects the space for the journal
 was already allocated and initialized properly by the userspace tools.
-When loading the journal you must call :c:func:`jbd2_journal_load` to process
+When loading the journal you must call jbd2_journal_load() to process
 journal contents. If the client file system detects the journal contents
 does not need to be processed (or even need not have valid contents), it
-may call :c:func:`jbd2_journal_wipe` to clear the journal contents before
-calling :c:func:`jbd2_journal_load`.
+may call jbd2_journal_wipe() to clear the journal contents before
+calling jbd2_journal_load().
 
 Note that jbd2_journal_wipe(..,0) calls
-:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding
-transactions in the journal and similarly :c:func:`jbd2_journal_load` will
-call :c:func:`jbd2_journal_recover` if necessary. I would advise reading
-:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage.
+jbd2_journal_skip_recovery() for you if it detects any outstanding
+transactions in the journal and similarly jbd2_journal_load() will
+call jbd2_journal_recover() if necessary. I would advise reading
+ext4_load_journal() in fs/ext4/super.c for examples on this stage.
 
 Now you can go ahead and start modifying the underlying filesystem.
 Almost.
@@ -39,57 +39,57 @@ You still need to actually journal your filesystem changes, this is done
 by wrapping them into transactions. Additionally you also need to wrap
 the modification of each of the buffers with calls to the journal layer,
 so it knows what the modifications you are actually making are. To do
-this use :c:func:`jbd2_journal_start` which returns a transaction handle.
+this use jbd2_journal_start() which returns a transaction handle.
 
-:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`,
+jbd2_journal_start() and its counterpart jbd2_journal_stop(),
 which indicates the end of a transaction are nestable calls, so you can
 reenter a transaction if necessary, but remember you must call
-:c:func:`jbd2_journal_stop` the same number of times as
-:c:func:`jbd2_journal_start` before the transaction is completed (or more
+jbd2_journal_stop() the same number of times as
+jbd2_journal_start() before the transaction is completed (or more
 accurately leaves the update phase). Ext4/VFS makes use of this feature to
 simplify handling of inode dirtying, quota support, etc.
 
 Inside each transaction you need to wrap the modifications to the
 individual buffers (blocks). Before you start to modify a buffer you
-need to call :c:func:`jbd2_journal_get_create_access()` /
-:c:func:`jbd2_journal_get_write_access()` /
-:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the
+need to call jbd2_journal_get_create_access() /
+jbd2_journal_get_write_access() /
+jbd2_journal_get_undo_access() as appropriate, this allows the
 journalling layer to copy the unmodified
 data if it needs to. After all the buffer may be part of a previously
 uncommitted transaction. At this point you are at last ready to modify a
 buffer, and once you are have done so you need to call
-:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a
+jbd2_journal_dirty_metadata(). Or if you've asked for access to a
 buffer you now know is now longer required to be pushed back on the
-device you can call :c:func:`jbd2_journal_forget` in much the same way as you
-might have used :c:func:`bforget` in the past.
+device you can call jbd2_journal_forget() in much the same way as you
+might have used bforget() in the past.
 
-A :c:func:`jbd2_journal_flush` may be called at any time to commit and
+A jbd2_journal_flush() may be called at any time to commit and
 checkpoint all your transactions.
 
-Then at umount time , in your :c:func:`put_super` you can then call
-:c:func:`jbd2_journal_destroy` to clean up your in-core journal object.
+Then at umount time , in your put_super() you can then call
+jbd2_journal_destroy() to clean up your in-core journal object.
 
 Unfortunately there a couple of ways the journal layer can cause a
 deadlock. The first thing to note is that each task can only have a
 single outstanding transaction at any one time, remember nothing commits
-until the outermost :c:func:`jbd2_journal_stop`. This means you must complete
+until the outermost jbd2_journal_stop(). This means you must complete
 the transaction at the end of each file/inode/address etc. operation you
 perform, so that the journalling system isn't re-entered on another
 journal. Since transactions can't be nested/batched across differing
 journals, and another filesystem other than yours (say ext4) may be
 modified in a later syscall.
 
-The second case to bear in mind is that :c:func:`jbd2_journal_start` can block
+The second case to bear in mind is that jbd2_journal_start() can block
 if there isn't enough space in the journal for your transaction (based
 on the passed nblocks param) - when it blocks it merely(!) needs to wait
 for transactions to complete and be committed from other tasks, so
-essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid
-deadlocks you must treat :c:func:`jbd2_journal_start` /
-:c:func:`jbd2_journal_stop` as if they were semaphores and include them in
+essentially we are waiting for jbd2_journal_stop(). So to avoid
+deadlocks you must treat jbd2_journal_start() /
+jbd2_journal_stop() as if they were semaphores and include them in
 your semaphore ordering rules to prevent
-deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking
-behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as
-easily as on :c:func:`jbd2_journal_start`.
+deadlocks. Note that jbd2_journal_extend() has similar blocking
+behaviour to jbd2_journal_start() so you can deadlock here just as
+easily as on jbd2_journal_start().
 
 Try to reserve the right number of blocks the first time. ;-). This will
 be the maximum number of blocks you are going to touch in this
@@ -116,8 +116,8 @@ called after each transaction commit. You can also use
 that need processing when the transaction commits.
 
 JBD2 also provides a way to block all transaction updates via
-:c:func:`jbd2_journal_lock_updates()` /
-:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a
+jbd2_journal_lock_updates() /
+jbd2_journal_unlock_updates(). Ext4 uses this when it wants a
 window with a clean and stable fs for a moment. E.g.
 
 ::
index e024a9e..533c79e 100644 (file)
@@ -1633,9 +1633,6 @@ may allocate from based on an estimation of its current memory and swap use.
 For example, if a task is using all allowed memory, its badness score will be
 1000.  If it is using half of its allowed memory, its score will be 500.
 
-There is an additional factor included in the badness score: the current memory
-and swap usage is discounted by 3% for root processes.
-
 The amount of "allowed" memory depends on the context in which the oom killer
 was called.  If it is due to the memory assigned to the allocating task's cpuset
 being exhausted, the allowed memory represents the set of mems assigned to that
@@ -1671,11 +1668,6 @@ The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
 value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
 requires CAP_SYS_RESOURCE.
 
-Caveat: when a parent task is selected, the oom killer will sacrifice any first
-generation children with separate address spaces instead, if possible.  This
-avoids servers and important system daemons from being killed and loses the
-minimal amount of work.
-
 
 3.2 /proc/<pid>/oom_score - Display current oom-killer score
 -------------------------------------------------------------
@@ -1684,6 +1676,9 @@ This file can be used to check the current score used by the oom-killer for
 any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
 process should be killed in an out-of-memory situation.
 
+Please note that the exported value includes oom_score_adj so it is
+effectively in range [0,2000].
+
 
 3.3  /proc/<pid>/io - Display the IO accounting fields
 -------------------------------------------------------
index 71d845c..6c18bc8 100644 (file)
@@ -110,14 +110,14 @@ contain files named "0", "1", "2", ... The file numbers also represent
 increasing zone start sector on the device.
 
 All read and write operations to zone files are not allowed beyond the file
-maximum size, that is, beyond the zone size. Any access exceeding the zone
-size is failed with the -EFBIG error.
+maximum size, that is, beyond the zone capacity. Any access exceeding the zone
+capacity is failed with the -EFBIG error.
 
 Creating, deleting, renaming or modifying any attribute of files and
 sub-directories is not allowed.
 
 The number of blocks of a file as reported by stat() and fstat() indicates the
-size of the file zone, or in other words, the maximum file size.
+capacity of the zone file, or in other words, the maximum file size.
 
 Conventional zone files
 -----------------------
@@ -156,8 +156,8 @@ all accepted.
 
 Truncating sequential zone files is allowed only down to 0, in which case, the
 zone is reset to rewind the file zone write pointer position to the start of
-the zone, or up to the zone size, in which case the file's zone is transitioned
-to the FULL state (finish zone operation).
+the zone, or up to the zone capacity, in which case the file's zone is
+transitioned to the FULL state (finish zone operation).
 
 Format options
 --------------
@@ -324,7 +324,7 @@ file size set to 0. This is necessary as the write pointer of read-only zones
 is defined as invalib by the ZBC and ZAC standards, making it impossible to
 discover the amount of data that has been written to the zone. In the case of a
 read-only zone discovered at run-time, as indicated in the previous section.
-the size of the zone file is left unchanged from its last updated value.
+The size of the zone file is left unchanged from its last updated value.
 
 Zonefs User Space Tools
 =======================
@@ -401,8 +401,9 @@ append-writes to the file::
     # ls -l /mnt/seq/0
     -rw-r----- 1 root root 0 Nov 25 13:49 /mnt/seq/0
 
-Since files are statically mapped to zones on the disk, the number of blocks of
-a file as reported by stat() and fstat() indicates the size of the file zone::
+Since files are statically mapped to zones on the disk, the number of blocks
+of a file as reported by stat() and fstat() indicates the capacity of the file
+zone::
 
     # stat /mnt/seq/0
     File: /mnt/seq/0
@@ -416,5 +417,6 @@ a file as reported by stat() and fstat() indicates the size of the file zone::
 
 The number of blocks of the file ("Blocks") in units of 512B blocks gives the
 maximum file size of 524288 * 512 B = 256 MB, corresponding to the device zone
-size in this example. Of note is that the "IO block" field always indicates the
-minimum I/O size for writes and corresponds to the device physical sector size.
+capacity in this example. Of note is that the "IO block" field always
+indicates the minimum I/O size for writes and corresponds to the device
+physical sector size.
diff --git a/Documentation/gpu/backlight.rst b/Documentation/gpu/backlight.rst
new file mode 100644 (file)
index 0000000..9ebfc9d
--- /dev/null
@@ -0,0 +1,12 @@
+=================
+Backlight support
+=================
+
+.. kernel-doc:: drivers/video/backlight/backlight.c
+   :doc: overview
+
+.. kernel-doc:: include/linux/backlight.h
+   :internal:
+
+.. kernel-doc:: drivers/video/backlight/backlight.c
+   :export:
index 1fcf8e8..c9a51e3 100644 (file)
@@ -12,6 +12,7 @@ Linux GPU Driver Developer's Guide
    drm-uapi
    drm-client
    drivers
+   backlight
    vga-switcheroo
    vgaarbiter
    todo
index b83da0e..faf3233 100644 (file)
@@ -43,6 +43,7 @@ Supported adapters:
   * Intel Elkhart Lake (PCH)
   * Intel Tiger Lake (PCH)
   * Intel Jasper Lake (SOC)
+  * Intel Emmitsburg (PCH)
 
    Datasheets: Publicly available at the Intel website
 
index bdb247f..73ad348 100644 (file)
@@ -159,6 +159,8 @@ for details) through the following functions::
   __s32 i2c_smbus_read_word_data(int file, __u8 command);
   __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value);
   __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value);
+  __s32 i2c_smbus_block_process_call(int file, __u8 command, __u8 length,
+                                     __u8 *values);
   __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values);
   __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
                                    __u8 *values);
index fee4744..8a2ad38 100644 (file)
@@ -62,7 +62,6 @@ Legacy documentation
 .. toctree::
    :maxdepth: 1
 
-   upgrading-clients
    old-module-parameters
 
 .. only::  subproject and html
diff --git a/Documentation/i2c/upgrading-clients.rst b/Documentation/i2c/upgrading-clients.rst
deleted file mode 100644 (file)
index 1708090..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-=================================================
-Upgrading I2C Drivers to the new 2.6 Driver Model
-=================================================
-
-Ben Dooks <ben-linux@fluff.org>
-
-Introduction
-------------
-
-This guide outlines how to alter existing Linux 2.6 client drivers from
-the old to the new binding methods.
-
-
-Example old-style driver
-------------------------
-
-::
-
-  struct example_state {
-       struct i2c_client       client;
-       ....
-  };
-
-  static struct i2c_driver example_driver;
-
-  static unsigned short ignore[] = { I2C_CLIENT_END };
-  static unsigned short normal_addr[] = { OUR_ADDR, I2C_CLIENT_END };
-
-  I2C_CLIENT_INSMOD;
-
-  static int example_attach(struct i2c_adapter *adap, int addr, int kind)
-  {
-       struct example_state *state;
-       struct device *dev = &adap->dev;  /* to use for dev_ reports */
-       int ret;
-
-       state = kzalloc(sizeof(struct example_state), GFP_KERNEL);
-       if (state == NULL) {
-               dev_err(dev, "failed to create our state\n");
-               return -ENOMEM;
-       }
-
-       example->client.addr    = addr;
-       example->client.flags   = 0;
-       example->client.adapter = adap;
-
-       i2c_set_clientdata(&state->i2c_client, state);
-       strscpy(client->i2c_client.name, "example", sizeof(client->i2c_client.name));
-
-       ret = i2c_attach_client(&state->i2c_client);
-       if (ret < 0) {
-               dev_err(dev, "failed to attach client\n");
-               kfree(state);
-               return ret;
-       }
-
-       dev = &state->i2c_client.dev;
-
-       /* rest of the initialisation goes here. */
-
-       dev_info(dev, "example client created\n");
-
-       return 0;
-  }
-
-  static int example_detach(struct i2c_client *client)
-  {
-       struct example_state *state = i2c_get_clientdata(client);
-
-       i2c_detach_client(client);
-       kfree(state);
-       return 0;
-  }
-
-  static int example_attach_adapter(struct i2c_adapter *adap)
-  {
-       return i2c_probe(adap, &addr_data, example_attach);
-  }
-
-  static struct i2c_driver example_driver = {
-       .driver         = {
-               .owner          = THIS_MODULE,
-               .name           = "example",
-               .pm             = &example_pm_ops,
-       },
-       .attach_adapter = example_attach_adapter,
-       .detach_client  = example_detach,
-  };
-
-
-Updating the client
--------------------
-
-The new style binding model will check against a list of supported
-devices and their associated address supplied by the code registering
-the busses. This means that the driver .attach_adapter and
-.detach_client methods can be removed, along with the addr_data,
-as follows::
-
-  - static struct i2c_driver example_driver;
-
-  - static unsigned short ignore[] = { I2C_CLIENT_END };
-  - static unsigned short normal_addr[] = { OUR_ADDR, I2C_CLIENT_END };
-
-  - I2C_CLIENT_INSMOD;
-
-  - static int example_attach_adapter(struct i2c_adapter *adap)
-  - {
-  -    return i2c_probe(adap, &addr_data, example_attach);
-  - }
-
-    static struct i2c_driver example_driver = {
-  -    .attach_adapter = example_attach_adapter,
-  -    .detach_client  = example_detach,
-    }
-
-Add the probe and remove methods to the i2c_driver, as so::
-
-   static struct i2c_driver example_driver = {
-  +    .probe          = example_probe,
-  +    .remove         = example_remove,
-   }
-
-Change the example_attach method to accept the new parameters
-which include the i2c_client that it will be working with::
-
-  - static int example_attach(struct i2c_adapter *adap, int addr, int kind)
-  + static int example_probe(struct i2c_client *client,
-  +                       const struct i2c_device_id *id)
-
-Change the name of example_attach to example_probe to align it with the
-i2c_driver entry names. The rest of the probe routine will now need to be
-changed as the i2c_client has already been setup for use.
-
-The necessary client fields have already been setup before
-the probe function is called, so the following client setup
-can be removed::
-
-  -    example->client.addr    = addr;
-  -    example->client.flags   = 0;
-  -    example->client.adapter = adap;
-  -
-  -    strscpy(client->i2c_client.name, "example", sizeof(client->i2c_client.name));
-
-The i2c_set_clientdata is now::
-
-  -    i2c_set_clientdata(&state->client, state);
-  +    i2c_set_clientdata(client, state);
-
-The call to i2c_attach_client is no longer needed, if the probe
-routine exits successfully, then the driver will be automatically
-attached by the core. Change the probe routine as so::
-
-  -    ret = i2c_attach_client(&state->i2c_client);
-  -    if (ret < 0) {
-  -            dev_err(dev, "failed to attach client\n");
-  -            kfree(state);
-  -            return ret;
-  -    }
-
-
-Remove the storage of 'struct i2c_client' from the 'struct example_state'
-as we are provided with the i2c_client in our example_probe. Instead we
-store a pointer to it for when it is needed.
-
-::
-
-  struct example_state {
-  -    struct i2c_client       client;
-  +    struct i2c_client       *client;
-
-the new i2c client as so::
-
-  -    struct device *dev = &adap->dev;  /* to use for dev_ reports */
-  +    struct device *dev = &i2c_client->dev;  /* to use for dev_ reports */
-
-And remove the change after our client is attached, as the driver no
-longer needs to register a new client structure with the core::
-
-  -    dev = &state->i2c_client.dev;
-
-In the probe routine, ensure that the new state has the client stored
-in it::
-
-  static int example_probe(struct i2c_client *i2c_client,
-                        const struct i2c_device_id *id)
-  {
-       struct example_state *state;
-       struct device *dev = &i2c_client->dev;
-       int ret;
-
-       state = kzalloc(sizeof(struct example_state), GFP_KERNEL);
-       if (state == NULL) {
-               dev_err(dev, "failed to create our state\n");
-               return -ENOMEM;
-       }
-
-  +    state->client = i2c_client;
-
-Update the detach method, by changing the name to _remove and
-to delete the i2c_detach_client call. It is possible that you
-can also remove the ret variable as it is not needed for any
-of the core functions.
-
-::
-
-  - static int example_detach(struct i2c_client *client)
-  + static int example_remove(struct i2c_client *client)
-  {
-       struct example_state *state = i2c_get_clientdata(client);
-
-  -    i2c_detach_client(client);
-
-And finally ensure that we have the correct ID table for the i2c-core
-and other utilities::
-
-  + struct i2c_device_id example_idtable[] = {
-  +       { "example", 0 },
-  +       { }
-  +};
-  +
-  +MODULE_DEVICE_TABLE(i2c, example_idtable);
-
-  static struct i2c_driver example_driver = {
-       .driver         = {
-               .owner          = THIS_MODULE,
-               .name           = "example",
-       },
-  +    .id_table       = example_ids,
-
-
-Our driver should now look like this::
-
-  struct example_state {
-       struct i2c_client       *client;
-       ....
-  };
-
-  static int example_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
-  {
-       struct example_state *state;
-       struct device *dev = &client->dev;
-
-       state = kzalloc(sizeof(struct example_state), GFP_KERNEL);
-       if (state == NULL) {
-               dev_err(dev, "failed to create our state\n");
-               return -ENOMEM;
-       }
-
-       state->client = client;
-       i2c_set_clientdata(client, state);
-
-       /* rest of the initialisation goes here. */
-
-       dev_info(dev, "example client created\n");
-
-       return 0;
-  }
-
-  static int example_remove(struct i2c_client *client)
-  {
-       struct example_state *state = i2c_get_clientdata(client);
-
-       kfree(state);
-       return 0;
-  }
-
-  static struct i2c_device_id example_idtable[] = {
-       { "example", 0 },
-       { }
-  };
-
-  MODULE_DEVICE_TABLE(i2c, example_idtable);
-
-  static struct i2c_driver example_driver = {
-       .driver         = {
-               .owner          = THIS_MODULE,
-               .name           = "example",
-               .pm             = &example_pm_ops,
-       },
-       .id_table       = example_idtable,
-       .probe          = example_probe,
-       .remove         = example_remove,
-  };
index b8e90b6..10c62e6 100644 (file)
@@ -99,7 +99,7 @@ the sake of simplicity.
 
       /*
        * Give userspace some time to read the events before we destroy the
-       * device with UI_DEV_DESTOY.
+       * device with UI_DEV_DESTROY.
        */
       sleep(1);
 
@@ -164,7 +164,7 @@ mouse.
 
       /*
        * Give userspace some time to read the events before we destroy the
-       * device with UI_DEV_DESTOY.
+       * device with UI_DEV_DESTROY.
        */
       sleep(1);
 
@@ -233,7 +233,7 @@ but interact with uinput via ioctl calls, or use libevdev.
 
       /*
        * Give userspace some time to read the events before we destroy the
-       * device with UI_DEV_DESTOY.
+       * device with UI_DEV_DESTROY.
        */
       sleep(1);
 
index b9fd56c..df245fd 100644 (file)
@@ -42,7 +42,7 @@
 # "select FW_LOADER" [0], in the end the simple alternative solution to this
 # problem consisted on matching semantics with newly introduced features.
 #
-# [0] http://lkml.kernel.org/r/1432241149-8762-1-git-send-email-mcgrof@do-not-panic.com
+# [0] https://lkml.kernel.org/r/1432241149-8762-1-git-send-email-mcgrof@do-not-panic.com
 
 mainmenu "Simple example to demo cumulative kconfig recursive dependency implication"
 
index 39881b7..1cf1aeb 100644 (file)
@@ -540,8 +540,8 @@ followed by a test macro::
 If you need to expose a compiler capability to makefiles and/or C source files,
 `CC_HAS_` is the recommended prefix for the config option::
 
-  config CC_HAS_STACKPROTECTOR_NONE
-       def_bool $(cc-option,-fno-stack-protector)
+  config CC_HAS_ASM_GOTO
+       def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
 
 Build as module only
 ~~~~~~~~~~~~~~~~~~~~
@@ -688,10 +688,10 @@ and real world requirements were not well understood. As it stands though
 only reverse engineering techniques have been used to deduce semantics from
 variability modeling languages such as Kconfig [3]_.
 
-.. [0] http://www.eng.uwaterloo.ca/~shshe/kconfig_semantics.pdf
-.. [1] http://gsd.uwaterloo.ca/sites/default/files/vm-2013-berger.pdf
-.. [2] http://gsd.uwaterloo.ca/sites/default/files/ase241-berger_0.pdf
-.. [3] http://gsd.uwaterloo.ca/sites/default/files/icse2011.pdf
+.. [0] https://www.eng.uwaterloo.ca/~shshe/kconfig_semantics.pdf
+.. [1] https://gsd.uwaterloo.ca/sites/default/files/vm-2013-berger.pdf
+.. [2] https://gsd.uwaterloo.ca/sites/default/files/ase241-berger_0.pdf
+.. [3] https://gsd.uwaterloo.ca/sites/default/files/icse2011.pdf
 
 Full SAT solver for Kconfig
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -710,10 +710,10 @@ such efforts somehow on Kconfig. There is enough interest from mentors of
 existing projects to not only help advise how to integrate this work upstream
 but also help maintain it long term. Interested developers should visit:
 
-http://kernelnewbies.org/KernelProjects/kconfig-sat
+https://kernelnewbies.org/KernelProjects/kconfig-sat
 
-.. [4] http://www.cs.cornell.edu/~sabhar/chapters/SATSolvers-KR-Handbook.pdf
-.. [5] http://gsd.uwaterloo.ca/sites/default/files/vm-2013-berger.pdf
+.. [4] https://www.cs.cornell.edu/~sabhar/chapters/SATSolvers-KR-Handbook.pdf
+.. [5] https://gsd.uwaterloo.ca/sites/default/files/vm-2013-berger.pdf
 .. [6] https://cados.cs.fau.de
 .. [7] https://vamos.cs.fau.de
 .. [8] https://undertaker.cs.fau.de
index c776b6e..2aac50b 100644 (file)
@@ -76,7 +76,7 @@ Getting Help
 Getting LLVM
 -------------
 
-- http://releases.llvm.org/download.html
+- https://releases.llvm.org/download.html
 - https://github.com/llvm/llvm-project
 - https://llvm.org/docs/GettingStarted.html
 - https://llvm.org/docs/CMake.html
index 6515ebc..b81b891 100644 (file)
@@ -368,6 +368,14 @@ more details, with real examples.
 
                subdir-ccflags-y := -Werror
 
+    ccflags-remove-y, asflags-remove-y
+       These flags are used to remove particular flags for the compiler,
+       assembler invocations.
+
+       Example::
+
+               ccflags-remove-$(CONFIG_MCOUNT) += -pg
+
     CFLAGS_$@, AFLAGS_$@
        CFLAGS_$@ and AFLAGS_$@ only apply to commands in current
        kbuild makefile.
@@ -375,6 +383,9 @@ more details, with real examples.
        $(CFLAGS_$@) specifies per-file options for $(CC).  The $@
        part has a literal value which specifies the file that it is for.
 
+       CFLAGS_$@ has the higher priority than ccflags-remove-y; CFLAGS_$@
+       can re-add compiler flags that were removed by ccflags-remove-y.
+
        Example::
 
                # drivers/scsi/Makefile
@@ -387,6 +398,9 @@ more details, with real examples.
        $(AFLAGS_$@) is a similar feature for source files in assembly
        languages.
 
+       AFLAGS_$@ has the higher priority than asflags-remove-y; AFLAGS_$@
+       can re-add assembler flags that were removed by asflags-remove-y.
+
        Example::
 
                # arch/arm/kernel/Makefile
@@ -735,6 +749,10 @@ Both possibilities are described in the following.
                hostprogs     := lxdialog
                always-y      := $(hostprogs)
 
+       Kbuild provides the following shorthand for this:
+
+               hostprogs-always-y := lxdialog
+
        This will tell kbuild to build lxdialog even if not referenced in
        any rule.
 
@@ -817,7 +835,32 @@ The syntax is quite similar. The difference is to use "userprogs" instead of
 5.4 When userspace programs are actually built
 ----------------------------------------------
 
-       Same as "When host programs are actually built".
+       Kbuild builds userspace programs only when told to do so.
+       There are two ways to do this.
+
+       (1) Add it as the prerequisite of another file
+
+       Example::
+
+               #net/bpfilter/Makefile
+               userprogs := bpfilter_umh
+               $(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
+
+       $(obj)/bpfilter_umh is built before $(obj)/bpfilter_umh_blob.o
+
+       (2) Use always-y
+
+       Example::
+
+               userprogs := binderfs_example
+               always-y := $(userprogs)
+
+       Kbuild provides the following shorthand for this:
+
+               userprogs-always-y := binderfs_example
+
+       This will tell Kbuild to build binderfs_example when it visits this
+       Makefile.
 
 6 Kbuild clean infrastructure
 =============================
index 1b577a8..4cefed8 100644 (file)
@@ -10,7 +10,7 @@ Introduction
 ============
 
 The kernel provides a variety of locking primitives which can be divided
-into two categories:
+into three categories:
 
  - Sleeping locks
  - CPU local locks
index 366dd36..62c5ad9 100644 (file)
@@ -87,6 +87,58 @@ Read path::
        } while (read_seqcount_retry(&foo_seqcount, seq));
 
 
+.. _seqcount_locktype_t:
+
+Sequence counters with associated locks (``seqcount_LOCKTYPE_t``)
+-----------------------------------------------------------------
+
+As discussed at :ref:`seqcount_t`, sequence count write side critical
+sections must be serialized and non-preemptible. This variant of
+sequence counters associate the lock used for writer serialization at
+initialization time, which enables lockdep to validate that the write
+side critical sections are properly serialized.
+
+This lock association is a NOOP if lockdep is disabled and has neither
+storage nor runtime overhead. If lockdep is enabled, the lock pointer is
+stored in struct seqcount and lockdep's "lock is held" assertions are
+injected at the beginning of the write side critical section to validate
+that it is properly protected.
+
+For lock types which do not implicitly disable preemption, preemption
+protection is enforced in the write side function.
+
+The following sequence counters with associated locks are defined:
+
+  - ``seqcount_spinlock_t``
+  - ``seqcount_raw_spinlock_t``
+  - ``seqcount_rwlock_t``
+  - ``seqcount_mutex_t``
+  - ``seqcount_ww_mutex_t``
+
+The plain seqcount read and write APIs branch out to the specific
+seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel
+API explosion per each new seqcount LOCKTYPE.
+
+Initialization (replace "LOCKTYPE" with one of the supported locks)::
+
+       /* dynamic */
+       seqcount_LOCKTYPE_t foo_seqcount;
+       seqcount_LOCKTYPE_init(&foo_seqcount, &lock);
+
+       /* static */
+       static seqcount_LOCKTYPE_t foo_seqcount =
+               SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock);
+
+       /* C99 struct init */
+       struct {
+               .seq   = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock),
+       } foo;
+
+Write path: same as in :ref:`seqcount_t`, while running from a context
+with the associated LOCKTYPE lock acquired.
+
+Read path: same as in :ref:`seqcount_t`.
+
 .. _seqlock_t:
 
 Sequential locks (``seqlock_t``)
index 24168b0..adc3146 100644 (file)
@@ -2860,17 +2860,6 @@ version of the linux kernel, found on http://kernel.org
 The latest version of this document can be found in the latest kernel
 source (named Documentation/networking/bonding.rst).
 
-Discussions regarding the usage of the bonding driver take place on the
-bonding-devel mailing list, hosted at sourceforge.net. If you have questions or
-problems, post them to the list.  The list address is:
-
-bonding-devel@lists.sourceforge.net
-
-The administrative interface (to subscribe or unsubscribe) can
-be found at:
-
-https://lists.sourceforge.net/lists/listinfo/bonding-devel
-
 Discussions regarding the development of the bonding driver take place
 on the main Linux network mailing list, hosted at vger.kernel.org. The list
 address is:
@@ -2881,10 +2870,3 @@ The administrative interface (to subscribe or unsubscribe) can
 be found at:
 
 http://vger.kernel.org/vger-lists.html#netdev
-
-Donald Becker's Ethernet Drivers and diag programs may be found at :
-
- - http://web.archive.org/web/%2E/http://www.scyld.com/network/
-
-You will also find a lot of information regarding Ethernet, NWay, MII,
-etc. at www.scyld.com.
index 3588f48..4ae1e0f 100644 (file)
@@ -113,7 +113,7 @@ than one development cycle past their initial release. So, for example, the
 5.2 kernel's history looked like this (all dates in 2019):
 
        ==============  ===============================
-       September 15    5.2 stable release
+       July 7          5.2 stable release
        July 14         5.2.1
        July 21         5.2.2
        July 26         5.2.3
index 24b7a9e..1ce353c 100644 (file)
@@ -192,9 +192,9 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
-               void (*cb)(struct rpmsg_channel *, void *, int, void *, u32),
-               void *priv, u32 addr);
+  struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
+                                         rpmsg_rx_cb_t cb, void *priv,
+                                         struct rpmsg_channel_info chinfo);
 
 every rpmsg address in the system is bound to an rx callback (so when
 inbound messages arrive, they are dispatched by the rpmsg bus using the
index 70b7126..b31818d 100644 (file)
@@ -58,7 +58,7 @@ Bus and Subdevices
 
 For each Intel TH device in the system a bus of its own is
 created and assigned an id number that reflects the order in which TH
-devices were emumerated. All TH subdevices (devices on intel_th bus)
+devices were enumerated. All TH subdevices (devices on intel_th bus)
 begin with this id: 0-gth, 0-msc0, 0-msc1, 0-pti, 0-sth, which is
 followed by device's name and an optional index.
 
index 0116d04..c972731 100644 (file)
@@ -1,6 +1,6 @@
-=======
+========
 CPU 负载
-=======
+========
 
 Linux通过``/proc/stat``和``/proc/uptime``导出各种信息,用户空间工具
 如top(1)使用这些信息计算系统花费在某个特定状态的平均时间。
index 7d502fa..ed5ab7e 100644 (file)
@@ -1,6 +1,6 @@
 .. include:: ../disclaimer-zh_CN.rst
 
-:Original: :ref:`Documentation/admin-guide/index.rst`
+:Original: :doc:`../../../admin-guide/index`
 :Translator: Alex Shi <alex.shi@linux.alibaba.com>
 
 
index 59472cd..2a19883 100644 (file)
@@ -339,6 +339,7 @@ Code  Seq#    Include File                                           Comments
 0xB4  00-0F  linux/gpio.h                                            <mailto:linux-gpio@vger.kernel.org>
 0xB5  00-0F  uapi/linux/rpmsg.h                                      <mailto:linux-remoteproc@vger.kernel.org>
 0xB6  all    linux/fpga-dfl.h
+0xB7  all    uapi/linux/remoteproc_cdev.h                            <mailto:linux-remoteproc@vger.kernel.org>
 0xC0  00-0F  linux/usb/iowarrior.h
 0xCA  00-0F  uapi/misc/cxl.h
 0xCA  10-2F  uapi/misc/ocxl.h
index 1d6cd7d..68883ac 100644 (file)
@@ -253,5 +253,32 @@ which are function pointers of struct address_space_operations.
      PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag
      for own purpose.
 
+Monitoring Migration
+=====================
+
+The following events (counters) can be used to monitor page migration.
+
+1. PGMIGRATE_SUCCESS: Normal page migration success. Each count means that a
+   page was migrated. If the page was a non-THP page, then this counter is
+   increased by one. If the page was a THP, then this counter is increased by
+   the number of THP subpages. For example, migration of a single 2MB THP that
+   has 4KB-size base pages (subpages) will cause this counter to increase by
+   512.
+
+2. PGMIGRATE_FAIL: Normal page migration failure. Same counting rules as for
+   _SUCCESS, above: this will be increased by the number of subpages, if it was
+   a THP.
+
+3. THP_MIGRATION_SUCCESS: A THP was migrated without being split.
+
+4. THP_MIGRATION_FAIL: A THP could not be migrated nor it could be split.
+
+5. THP_MIGRATION_SPLIT: A THP was migrated, but not as such: first, the THP had
+   to be split. After splitting, a migration retry was used for it's sub-pages.
+
+THP_MIGRATION_* events also update the appropriate PGMIGRATE_SUCCESS or
+PGMIGRATE_FAIL events. For example, a THP migration failure will cause both
+THP_MIGRATION_FAIL and PGMIGRATE_FAIL to increase.
+
 Christoph Lameter, May 8, 2006.
 Minchan Kim, Mar 28, 2016.
index bf5bafa..35e690d 100644 (file)
@@ -24,10 +24,19 @@ Type 2:
   Maximum timeout is 255 sec.
   Get time-left is supported.
 
+Type 3:
+  Same as Type 2 with extended maximum timeout period.
+  Maximum timeout is 65535 sec.
+
 Type 1 HW watchdog implementation exist in old systems and
 all new systems have type 2 HW watchdog.
 Two types of HW implementation have also different register map.
 
+Type 3 HW watchdog implementation can exist on all Mellanox systems
+with new programmer logic device.
+It's differentiated by WD capability bit.
+Old systems still have only one main watchdog.
+
 Mellanox system can have 2 watchdogs: main and auxiliary.
 Main and auxiliary watchdog devices can be enabled together
 on the same system.
@@ -54,3 +63,4 @@ The driver checks during initialization if the previous system reset
 was done by the watchdog. If yes, it makes a notification about this event.
 
 Access to HW registers is performed through a generic regmap interface.
+Programmable logic device registers have little-endian order.
index c6c1e9f..800dcd7 100644 (file)
@@ -168,7 +168,7 @@ the fields returned in the ident struct are:
 
 the options field can have the following bits set, and describes what
 kind of information that the GET_STATUS and GET_BOOT_STATUS ioctls can
-return.   [FIXME -- Is this correct?]
+return.
 
        ================        =========================
        WDIOF_OVERHEAT          Reset due to CPU overheat
index 068a55e..baf44e9 100644 (file)
@@ -336,3 +336,15 @@ an action is taken by a preconfigured pretimeout governor preassigned to
 the watchdog device. If watchdog pretimeout governor framework is not
 enabled, watchdog_notify_pretimeout() prints a notification message to
 the kernel log buffer.
+
+To set the last known HW keepalive time for a watchdog, the following function
+should be used::
+
+  int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
+                                     unsigned int last_ping_ms)
+
+This function must be called immediately after watchdog registration. It
+sets the last known hardware heartbeat to have happened last_ping_ms before
+current time. Calling this is only needed if the watchdog is already running
+when probe is called, and the watchdog can only be pinged after the
+min_hw_heartbeat_ms time has passed from the last ping.
index f77df02..f0068bc 100644 (file)
@@ -1496,7 +1496,7 @@ ARM SMC WATCHDOG DRIVER
 M:     Julius Werner <jwerner@chromium.org>
 R:     Evan Benn <evanbenn@chromium.org>
 S:     Maintained
-F:     devicetree/bindings/watchdog/arm-smc-wdt.yaml
+F:     Documentation/devicetree/bindings/watchdog/arm-smc-wdt.yaml
 F:     drivers/watchdog/arm_smc_wdt.c
 
 ARM SMMU DRIVERS
@@ -1505,7 +1505,7 @@ R:        Robin Murphy <robin.murphy@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/iommu/arm,smmu*
-F:     drivers/iommu/arm-smmu*
+F:     drivers/iommu/arm/
 F:     drivers/iommu/io-pgtable-arm-v7s.c
 F:     drivers/iommu/io-pgtable-arm.c
 
@@ -1540,6 +1540,7 @@ F:        drivers/mmc/host/owl-mmc.c
 F:     drivers/pinctrl/actions/*
 F:     drivers/soc/actions/
 F:     include/dt-bindings/power/owl-*
+F:     include/dt-bindings/reset/actions,*
 F:     include/linux/soc/actions/
 N:     owl
 
@@ -5049,7 +5050,7 @@ F:        include/linux/dm-*.h
 F:     include/uapi/linux/dm-*.h
 
 DEVLINK
-M:     Jiri Pirko <jiri@mellanox.com>
+M:     Jiri Pirko <jiri@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/devlink
@@ -6080,7 +6081,7 @@ F:        include/linux/dynamic_debug.h
 F:     lib/dynamic_debug.c
 
 DYNAMIC INTERRUPT MODERATION
-M:     Tal Gilboa <talgi@mellanox.com>
+M:     Tal Gilboa <talgi@nvidia.com>
 S:     Maintained
 F:     Documentation/networking/net_dim.rst
 F:     include/linux/dim.h
@@ -6160,7 +6161,7 @@ F:        Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
 F:     drivers/edac/aspeed_edac.c
 
 EDAC-BLUEFIELD
-M:     Shravan Kumar Ramani <sramani@mellanox.com>
+M:     Shravan Kumar Ramani <sramani@nvidia.com>
 S:     Supported
 F:     drivers/edac/bluefield_edac.c
 
@@ -6482,8 +6483,8 @@ S:        Odd Fixes
 F:     drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
-M:     Roopa Prabhu <roopa@cumulusnetworks.com>
-M:     Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+M:     Roopa Prabhu <roopa@nvidia.com>
+M:     Nikolay Aleksandrov <nikolay@nvidia.com>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -6598,7 +6599,7 @@ F:        drivers/iommu/exynos-iommu.c
 
 EZchip NPS platform support
 M:     Vineet Gupta <vgupta@synopsys.com>
-M:     Ofer Levi <oferle@mellanox.com>
+M:     Ofer Levi <oferle@nvidia.com>
 S:     Supported
 F:     arch/arc/boot/dts/eznps.dts
 F:     arch/arc/plat-eznps
@@ -8409,8 +8410,9 @@ W:        https://github.com/o2genum/ideapad-slidebar
 F:     drivers/input/misc/ideapad_slidebar.c
 
 IDT VersaClock 5 CLOCK DRIVER
-M:     Marek Vasut <marek.vasut@gmail.com>
+M:     Luca Ceresoli <luca@lucaceresoli.net>
 S:     Maintained
+F:     Documentation/devicetree/bindings/clock/idt,versaclock5.yaml
 F:     drivers/clk/clk-versaclock5.c
 
 IEEE 802.15.4 SUBSYSTEM
@@ -8561,7 +8563,7 @@ F:        drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
 M:     Doug Ledford <dledford@redhat.com>
-M:     Jason Gunthorpe <jgg@mellanox.com>
+M:     Jason Gunthorpe <jgg@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     https://github.com/linux-rdma/rdma-core
@@ -9102,6 +9104,7 @@ F:        drivers/iommu/
 F:     include/linux/iommu.h
 F:     include/linux/iova.h
 F:     include/linux/of_iommu.h
+F:     include/uapi/linux/iommu.h
 
 IO_URING
 M:     Jens Axboe <axboe@kernel.dk>
@@ -9223,7 +9226,7 @@ F:        drivers/firmware/iscsi_ibft*
 
 ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
 M:     Sagi Grimberg <sagi@grimberg.me>
-M:     Max Gurtovoy <maxg@mellanox.com>
+M:     Max Gurtovoy <maxg@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
@@ -9675,6 +9678,15 @@ F:       include/linux/kdb.h
 F:     include/linux/kgdb.h
 F:     kernel/debug/
 
+KHADAS MCU MFD DRIVER
+M:     Neil Armstrong <narmstrong@baylibre.com>
+L:     linux-amlogic@lists.infradead.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
+F:     drivers/mfd/khadas-mcu.c
+F:     include/linux/mfd/khadas-mcu.h
+F:     drivers/thermal/khadas_mcu_fan.c
+
 KMEMLEAK
 M:     Catalin Marinas <catalin.marinas@arm.com>
 S:     Maintained
@@ -11060,7 +11072,7 @@ F:      Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
 F:     drivers/input/touchscreen/melfas_mip4.c
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
-M:     Tariq Toukan <tariqt@mellanox.com>
+M:     Tariq Toukan <tariqt@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11068,7 +11080,7 @@ Q:      http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlx4/en_*
 
 MELLANOX ETHERNET DRIVER (mlx5e)
-M:     Saeed Mahameed <saeedm@mellanox.com>
+M:     Saeed Mahameed <saeedm@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11076,7 +11088,7 @@ Q:      http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlx5/core/en_*
 
 MELLANOX ETHERNET INNOVA DRIVERS
-R:     Boris Pismenny <borisp@mellanox.com>
+R:     Boris Pismenny <borisp@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11087,8 +11099,8 @@ F:      drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
 
 MELLANOX ETHERNET SWITCH DRIVERS
-M:     Jiri Pirko <jiri@mellanox.com>
-M:     Ido Schimmel <idosch@mellanox.com>
+M:     Jiri Pirko <jiri@nvidia.com>
+M:     Ido Schimmel <idosch@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11097,7 +11109,7 @@ F:      drivers/net/ethernet/mellanox/mlxsw/
 F:     tools/testing/selftests/drivers/net/mlxsw/
 
 MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
-M:     mlxsw@mellanox.com
+M:     mlxsw@nvidia.com
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11107,7 +11119,7 @@ F:      drivers/net/ethernet/mellanox/mlxfw/
 MELLANOX HARDWARE PLATFORM SUPPORT
 M:     Andy Shevchenko <andy@infradead.org>
 M:     Darren Hart <dvhart@infradead.org>
-M:     Vadim Pasternak <vadimp@mellanox.com>
+M:     Vadim Pasternak <vadimp@nvidia.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Supported
 F:     Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
@@ -11115,7 +11127,7 @@ F:      drivers/platform/mellanox/
 F:     include/linux/platform_data/mlxreg.h
 
 MELLANOX MLX4 core VPI driver
-M:     Tariq Toukan <tariqt@mellanox.com>
+M:     Tariq Toukan <tariqt@nvidia.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -11125,7 +11137,7 @@ F:      drivers/net/ethernet/mellanox/mlx4/
 F:     include/linux/mlx4/
 
 MELLANOX MLX4 IB driver
-M:     Yishai Hadas <yishaih@mellanox.com>
+M:     Yishai Hadas <yishaih@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11135,8 +11147,8 @@ F:      include/linux/mlx4/
 F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
-M:     Saeed Mahameed <saeedm@mellanox.com>
-M:     Leon Romanovsky <leonro@mellanox.com>
+M:     Saeed Mahameed <saeedm@nvidia.com>
+M:     Leon Romanovsky <leonro@nvidia.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -11147,7 +11159,7 @@ F:      drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
 
 MELLANOX MLX5 IB driver
-M:     Leon Romanovsky <leonro@mellanox.com>
+M:     Leon Romanovsky <leonro@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -11157,8 +11169,8 @@ F:      include/linux/mlx5/
 F:     include/uapi/rdma/mlx5-abi.h
 
 MELLANOX MLXCPLD I2C AND MUX DRIVER
-M:     Vadim Pasternak <vadimp@mellanox.com>
-M:     Michael Shych <michaelsh@mellanox.com>
+M:     Vadim Pasternak <vadimp@nvidia.com>
+M:     Michael Shych <michaelsh@nvidia.com>
 L:     linux-i2c@vger.kernel.org
 S:     Supported
 F:     Documentation/i2c/busses/i2c-mlxcpld.rst
@@ -11166,7 +11178,7 @@ F:      drivers/i2c/busses/i2c-mlxcpld.c
 F:     drivers/i2c/muxes/i2c-mux-mlxcpld.c
 
 MELLANOX MLXCPLD LED DRIVER
-M:     Vadim Pasternak <vadimp@mellanox.com>
+M:     Vadim Pasternak <vadimp@nvidia.com>
 L:     linux-leds@vger.kernel.org
 S:     Supported
 F:     Documentation/leds/leds-mlxcpld.rst
@@ -11174,7 +11186,7 @@ F:      drivers/leds/leds-mlxcpld.c
 F:     drivers/leds/leds-mlxreg.c
 
 MELLANOX PLATFORM DRIVER
-M:     Vadim Pasternak <vadimp@mellanox.com>
+M:     Vadim Pasternak <vadimp@nvidia.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Supported
 F:     drivers/platform/x86/mlx-platform.c
@@ -11983,7 +11995,8 @@ F:      include/uapi/linux/netrom.h
 F:     net/netrom/
 
 NETRONOME ETHERNET DRIVERS
-M:     Jakub Kicinski <kuba@kernel.org>
+M:     Simon Horman <simon.horman@netronome.com>
+R:     Jakub Kicinski <kuba@kernel.org>
 L:     oss-drivers@netronome.com
 S:     Maintained
 F:     drivers/net/ethernet/netronome/
@@ -12154,8 +12167,8 @@ F:      net/ipv6/syncookies.c
 F:     net/ipv6/tcp*.c
 
 NETWORKING [TLS]
-M:     Boris Pismenny <borisp@mellanox.com>
-M:     Aviad Yehezkel <aviadye@mellanox.com>
+M:     Boris Pismenny <borisp@nvidia.com>
+M:     Aviad Yehezkel <aviadye@nvidia.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <kuba@kernel.org>
@@ -12455,7 +12468,7 @@ S:      Supported
 F:     drivers/nfc/nxp-nci
 
 OBJAGG
-M:     Jiri Pirko <jiri@mellanox.com>
+M:     Jiri Pirko <jiri@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     include/linux/objagg.h
@@ -13097,7 +13110,7 @@ F:      drivers/video/logo/logo_parisc*
 F:     include/linux/hp_sdc.h
 
 PARMAN
-M:     Jiri Pirko <jiri@mellanox.com>
+M:     Jiri Pirko <jiri@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     include/linux/parman.h
@@ -13553,6 +13566,7 @@ F:      arch/*/kernel/perf_event*.c
 F:     include/linux/perf_event.h
 F:     include/uapi/linux/perf_event.h
 F:     kernel/events/*
+F:     tools/lib/perf/
 F:     tools/perf/
 
 PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS
@@ -14883,6 +14897,13 @@ L:     linux-serial@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/tty/serial/rp2.*
 
+ROHM BD99954 CHARGER IC
+R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+L:     linux-power@fi.rohmeurope.com
+S:     Supported
+F:     drivers/power/supply/bd99954-charger.c
+F:     drivers/power/supply/bd99954-charger.h
+
 ROHM BH1750 AMBIENT LIGHT SENSOR DRIVER
 M:     Tomasz Duszynski <tduszyns@gmail.com>
 S:     Maintained
@@ -14900,6 +14921,31 @@ F:     drivers/mfd/bd9571mwv.c
 F:     drivers/regulator/bd9571mwv-regulator.c
 F:     include/linux/mfd/bd9571mwv.h
 
+ROHM POWER MANAGEMENT IC DEVICE DRIVERS
+R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+L:     linux-power@fi.rohmeurope.com
+S:     Supported
+F:     Documentation/devicetree/bindings/mfd/rohm,bd70528-pmic.txt
+F:     Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt
+F:     drivers/clk/clk-bd718x7.c
+F:     drivers/gpio/gpio-bd70528.c
+F:     drivers/gpio/gpio-bd71828.c
+F:     drivers/mfd/rohm-bd70528.c
+F:     drivers/mfd/rohm-bd71828.c
+F:     drivers/mfd/rohm-bd718x7.c
+F:     drivers/power/supply/bd70528-charger.c
+F:     drivers/regulator/bd70528-regulator.c
+F:     drivers/regulator/bd71828-regulator.c
+F:     drivers/regulator/bd718x7-regulator.c
+F:     drivers/regulator/rohm-regulator.c
+F:     drivers/rtc/rtc-bd70528.c
+F:     drivers/watchdog/bd70528_wdt.c
+F:     include/linux/mfd/rohm-bd70528.h
+F:     include/linux/mfd/rohm-bd71828.h
+F:     include/linux/mfd/rohm-bd718x7.h
+F:     include/linux/mfd/rohm-generic.h
+F:     include/linux/mfd/rohm-shared.h
+
 ROSE NETWORK LAYER
 M:     Ralf Baechle <ralf@linux-mips.org>
 L:     linux-hams@vger.kernel.org
@@ -15988,7 +16034,7 @@ F:      drivers/infiniband/sw/siw/
 F:     include/uapi/rdma/siw-abi.h
 
 SOFT-ROCE DRIVER (rxe)
-M:     Zhu Yanjun <yanjunz@mellanox.com>
+M:     Zhu Yanjun <yanjunz@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/sw/rxe/
@@ -16973,8 +17019,10 @@ F:     drivers/i2c/busses/i2c-tegra.c
 
 TEGRA IOMMU DRIVERS
 M:     Thierry Reding <thierry.reding@gmail.com>
+R:     Krishna Reddy <vdumpa@nvidia.com>
 L:     linux-tegra@vger.kernel.org
 S:     Supported
+F:     drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
 F:     drivers/iommu/tegra*
 
 TEGRA KBC DRIVER
@@ -17065,6 +17113,7 @@ M:      Tero Kristo <t-kristo@ti.com>
 M:     Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml
 F:     Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
 F:     Documentation/devicetree/bindings/clock/ti,sci-clk.txt
 F:     Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
index 5cfc348..9cac6fd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
-PATCHLEVEL = 8
+PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -508,7 +508,6 @@ KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE :=
 export KBUILD_LDS_MODULE := $(srctree)/scripts/module-common.lds
 KBUILD_LDFLAGS :=
-GCC_PLUGINS_CFLAGS :=
 CLANG_FLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
@@ -747,9 +746,6 @@ endif
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 KBUILD_CFLAGS  += $(call cc-option,-fno-allow-store-data-races)
 
-include scripts/Makefile.kcov
-include scripts/Makefile.gcc-plugins
-
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -764,7 +760,7 @@ ifneq ($(CONFIG_FRAME_WARN),0)
 KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN)
 endif
 
-stackp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
+stackp-flags-y                                    := -fno-stack-protector
 stackp-flags-$(CONFIG_STACKPROTECTOR)             := -fstack-protector
 stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 
@@ -897,6 +893,10 @@ KBUILD_CFLAGS      += $(CC_FLAGS_SCS)
 export CC_FLAGS_SCS
 endif
 
+ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B
+KBUILD_CFLAGS += -falign-functions=32
+endif
+
 # arch Makefile may override CC so keep this after arch Makefile is included
 NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
 
@@ -959,10 +959,19 @@ ifdef CONFIG_RETPOLINE
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
 endif
 
-include scripts/Makefile.kasan
-include scripts/Makefile.extrawarn
-include scripts/Makefile.ubsan
-include scripts/Makefile.kcsan
+# include additional Makefiles when needed
+include-y                      := scripts/Makefile.extrawarn
+include-$(CONFIG_KASAN)                += scripts/Makefile.kasan
+include-$(CONFIG_KCSAN)                += scripts/Makefile.kcsan
+include-$(CONFIG_UBSAN)                += scripts/Makefile.ubsan
+include-$(CONFIG_KCOV)         += scripts/Makefile.kcov
+include-$(CONFIG_GCC_PLUGINS)  += scripts/Makefile.gcc-plugins
+
+include $(addprefix $(srctree)/, $(include-y))
+
+# scripts/Makefile.gcc-plugins is intentionally included last.
+# Do not add $(call cc-option,...) below this line. When you build the kernel
+# from the clean source tree, the GCC plugins do not exist at this point.
 
 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
 KBUILD_CPPFLAGS += $(KCPPFLAGS)
@@ -1120,7 +1129,6 @@ KBUILD_VMLINUX_OBJS += $(patsubst %/,%/built-in.a, $(drivers-y))
 
 export KBUILD_VMLINUX_OBJS KBUILD_VMLINUX_LIBS
 export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
-export LDFLAGS_vmlinux
 # used by scripts/Makefile.package
 export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
 
@@ -1152,7 +1160,7 @@ ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
 
 # Final link of vmlinux with optional arch pass after final link
 cmd_link-vmlinux =                                                 \
-       $(CONFIG_SHELL) $< $(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) ;    \
+       $(CONFIG_SHELL) $< "$(LD)" "$(KBUILD_LDFLAGS)" "$(LDFLAGS_vmlinux)";    \
        $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
 vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE
index a112448..af14a56 100644 (file)
@@ -481,9 +481,6 @@ config HAVE_STACKPROTECTOR
          An arch should select this symbol if:
          - it has implemented a stack canary (e.g. __stack_chk_guard)
 
-config CC_HAS_STACKPROTECTOR_NONE
-       def_bool $(cc-option,-fno-stack-protector)
-
 config STACKPROTECTOR
        bool "Stack Protector buffer overflow detection"
        depends on HAVE_STACKPROTECTOR
@@ -975,6 +972,9 @@ config HAVE_SPARSE_SYSCALL_NR
          entries at 4000, 5000 and 6000 locations. This option turns on syscall
          related optimizations for a given architecture.
 
+config ARCH_HAS_VDSO_DATA
+       bool
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index 0a07055..2d9726f 100644 (file)
@@ -384,7 +384,7 @@ struct el_apecs_procdata
                }                                               \
        } while (0)
 
-__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -420,7 +420,7 @@ __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -456,7 +456,7 @@ __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        if (addr < APECS_DENSE_MEM)
index c706a7f..cb22991 100644 (file)
@@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck {
 #define vuip   volatile unsigned int __force *
 #define vulp   volatile unsigned long __force *
 
-__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        if (addr < CIA_DENSE_MEM)
index 84d5e5b..ec86314 100644 (file)
@@ -230,7 +230,7 @@ union el_lca {
        } while (0)
 
 
-__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -266,7 +266,7 @@ __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        unsigned long result, base_and_type;
@@ -302,7 +302,7 @@ __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + base_and_type) = w;
 }
 
-__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        if (addr < LCA_DENSE_MEM)
index cc6fd92..b266e02 100644 (file)
@@ -332,10 +332,10 @@ struct io7 {
 #define vucp   volatile unsigned char __force *
 #define vusp   volatile unsigned short __force *
 
-extern unsigned int marvel_ioread8(void __iomem *);
+extern unsigned int marvel_ioread8(const void __iomem *);
 extern void marvel_iowrite8(u8 b, void __iomem *);
 
-__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr)
+__EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr)
 {
        return __kernel_ldwu(*(vusp)addr);
 }
index b30dc12..cb24d1b 100644 (file)
@@ -267,7 +267,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr)
        return (addr & 0x80000000UL) == 0;
 }
 
-__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
        unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
@@ -291,7 +291,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + hose + 0x00) = w;
 }
 
-__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
        unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
@@ -315,7 +315,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr)
        *(vuip) ((addr << 5) + hose + 0x08) = w;
 }
 
-__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long)xaddr;
 
index e0b33d0..12bb7ad 100644 (file)
@@ -572,7 +572,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr)
    it doesn't make sense to merge the pio and mmio routines.  */
 
 #define IOPORT(OS, NS)                                                 \
-__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr)                \
+__EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr)          \
 {                                                                      \
        if (t2_is_mmio(xaddr))                                          \
                return t2_read##OS(xaddr);                              \
index a4d0c19..1f6a909 100644 (file)
@@ -150,9 +150,9 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)  \
        alpha_mv.mv_##NAME(b, addr);                                    \
 }
 
-REMAP1(unsigned int, ioread8, /**/)
-REMAP1(unsigned int, ioread16, /**/)
-REMAP1(unsigned int, ioread32, /**/)
+REMAP1(unsigned int, ioread8, const)
+REMAP1(unsigned int, ioread16, const)
+REMAP1(unsigned int, ioread32, const)
 REMAP1(u8, readb, const volatile)
 REMAP1(u16, readw, const volatile)
 REMAP1(u32, readl, const volatile)
@@ -307,7 +307,7 @@ static inline int __is_mmio(const volatile void __iomem *addr)
  */
 
 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
-extern inline unsigned int ioread8(void __iomem *addr)
+extern inline unsigned int ioread8(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -316,7 +316,7 @@ extern inline unsigned int ioread8(void __iomem *addr)
        return ret;
 }
 
-extern inline unsigned int ioread16(void __iomem *addr)
+extern inline unsigned int ioread16(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -359,7 +359,7 @@ extern inline void outw(u16 b, unsigned long port)
 #endif
 
 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
-extern inline unsigned int ioread32(void __iomem *addr)
+extern inline unsigned int ioread32(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -489,10 +489,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
 }
 #endif
 
-#define ioread16be(p) be16_to_cpu(ioread16(p))
-#define ioread32be(p) be32_to_cpu(ioread32(p))
-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
+#define ioread16be(p) swab16(ioread16(p))
+#define ioread32be(p) swab32(ioread32(p))
+#define iowrite16be(v,p) iowrite16(swab16(v), (p))
+#define iowrite32be(v,p) iowrite32(swab32(v), (p))
 
 #define inb_p          inb
 #define inw_p          inw
index ba3d8f0..a1a29cb 100644 (file)
@@ -7,15 +7,15 @@
 
 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
 __EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a)
+IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a)
 {
-       return __kernel_ldbu(*(volatile u8 __force *)a);
+       return __kernel_ldbu(*(const volatile u8 __force *)a);
 }
 
 __EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a)
+IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a)
 {
-       return __kernel_ldwu(*(volatile u16 __force *)a);
+       return __kernel_ldwu(*(const volatile u16 __force *)a);
 }
 
 __EXTERN_INLINE void
@@ -33,9 +33,9 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a)
 
 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
 __EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a)
+IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a)
 {
-       return *(volatile u32 __force *)a;
+       return *(const volatile u32 __force *)a;
 }
 
 __EXTERN_INLINE void
@@ -73,14 +73,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
 __EXTERN_INLINE u8
 IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
 {
-       void __iomem *addr = (void __iomem *)a;
+       const void __iomem *addr = (const void __iomem *)a;
        return IO_CONCAT(__IO_PREFIX,ioread8)(addr);
 }
 
 __EXTERN_INLINE u16
 IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
 {
-       void __iomem *addr = (void __iomem *)a;
+       const void __iomem *addr = (const void __iomem *)a;
        return IO_CONCAT(__IO_PREFIX,ioread16)(addr);
 }
 
index 436dc90..9168951 100644 (file)
@@ -305,7 +305,7 @@ __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
    that it doesn't make sense to merge them.  */
 
 #define IOPORT(OS, NS)                                                 \
-__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr)    \
+__EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr)      \
 {                                                                      \
        if (jensen_is_mmio(xaddr))                                      \
                return jensen_read##OS(xaddr - 0x100000000ul);          \
index a6b73c6..a4e96e2 100644 (file)
@@ -46,9 +46,9 @@ struct alpha_machine_vector
        void (*mv_pci_tbi)(struct pci_controller *hose,
                           dma_addr_t start, dma_addr_t end);
 
-       unsigned int (*mv_ioread8)(void __iomem *);
-       unsigned int (*mv_ioread16)(void __iomem *);
-       unsigned int (*mv_ioread32)(void __iomem *);
+       unsigned int (*mv_ioread8)(const void __iomem *);
+       unsigned int (*mv_ioread16)(const void __iomem *);
+       unsigned int (*mv_ioread32)(const void __iomem *);
 
        void (*mv_iowrite8)(u8, void __iomem *);
        void (*mv_iowrite16)(u16, void __iomem *);
index 1fe2b56..1b6f25e 100644 (file)
@@ -20,7 +20,7 @@
 #define get_fs()  (current_thread_info()->addr_limit)
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 /*
  * Is a address valid? This does a straightforward calculation rather
index 4c80d99..4485b77 100644 (file)
@@ -806,7 +806,7 @@ void __iomem *marvel_ioportmap (unsigned long addr)
 }
 
 unsigned int
-marvel_ioread8(void __iomem *xaddr)
+marvel_ioread8(const void __iomem *xaddr)
 {
        unsigned long addr = (unsigned long) xaddr;
        if (__marvel_is_port_kbd(addr))
index 938de13..838586a 100644 (file)
@@ -14,7 +14,7 @@
    "generic", which bumps through the machine vector.  */
 
 unsigned int
-ioread8(void __iomem *addr)
+ioread8(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -23,7 +23,7 @@ ioread8(void __iomem *addr)
        return ret;
 }
 
-unsigned int ioread16(void __iomem *addr)
+unsigned int ioread16(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -32,7 +32,7 @@ unsigned int ioread16(void __iomem *addr)
        return ret;
 }
 
-unsigned int ioread32(void __iomem *addr)
+unsigned int ioread32(const void __iomem *addr)
 {
        unsigned int ret;
        mb();
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(readq_relaxed);
 /*
  * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
  */
-void ioread8_rep(void __iomem *port, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *port, void *dst, unsigned long count)
 {
        while ((unsigned long)dst & 0x3) {
                if (!count)
@@ -300,7 +300,7 @@ EXPORT_SYMBOL(insb);
  * the interfaces seems to be slow: just using the inlined version
  * of the inw() breaks things.
  */
-void ioread16_rep(void __iomem *port, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *port, void *dst, unsigned long count)
 {
        if (unlikely((unsigned long)dst & 0x3)) {
                if (!count)
@@ -340,7 +340,7 @@ EXPORT_SYMBOL(insw);
  * but the interfaces seems to be slow: just using the inlined version
  * of the inl() breaks things.
  */
-void ioread32_rep(void __iomem *port, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *port, void *dst, unsigned long count)
 {
        if (unlikely((unsigned long)dst & 0x3)) {
                while (count--) {
index a28fb21..ec8bed9 100644 (file)
 316    common  mlockall                        sys_mlockall
 317    common  munlockall                      sys_munlockall
 318    common  sysinfo                         sys_sysinfo
-319    common  _sysctl                         sys_sysctl
+319    common  _sysctl                         sys_ni_syscall
 # 320 was sys_idle
 321    common  oldumount                       sys_oldumount
 322    common  swapon                          sys_swapon
index c2303a8..09172f0 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/extable.h>
 #include <linux/uaccess.h>
+#include <linux/perf_event.h>
 
 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
 
@@ -116,6 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
 #endif
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
@@ -148,7 +150,7 @@ retry:
        /* If for any reason at all we couldn't handle the fault,
           make sure we exit gracefully rather than endlessly redo
           the fault.  */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -164,10 +166,6 @@ retry:
        }
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 6a2a5be..871f8ab 100644 (file)
@@ -14,8 +14,7 @@ typedef unsigned long mm_segment_t;
 
 #define KERNEL_DS              MAKE_MM_SEG(0)
 #define USER_DS                        MAKE_MM_SEG(TASK_SIZE)
-
-#define segment_eq(a, b)       ((a) == (b))
+#define uaccess_kernel()       (get_fs() == KERNEL_DS)
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASMARC_SEGMENT_H */
index 105420c..efeba1f 100644 (file)
@@ -91,7 +91,7 @@ fault:
                 goto fail;
 
        mmap_read_lock(current->mm);
-       ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+       ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
                               FAULT_FLAG_WRITE, NULL);
        mmap_read_unlock(current->mm);
 
@@ -296,11 +296,6 @@ void flush_thread(void)
 {
 }
 
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-       return 0;
-}
-
 int elf_check_arch(const struct elf32_hdr *x)
 {
        unsigned int eflags;
index 7287c79..f5657cb 100644 (file)
@@ -105,6 +105,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
        if (write)
                flags |= FAULT_FLAG_WRITE;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
 
@@ -130,7 +131,7 @@ retry:
                goto bad_area;
        }
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        /* Quick path to respond to signals */
        if (fault_signal_pending(fault, regs)) {
@@ -155,22 +156,9 @@ bad_area:
         * Major/minor page fault accounting
         * (in case of retry we only land here once)
         */
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-
-       if (likely(!(fault & VM_FAULT_ERROR))) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-                                     regs, address);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-                                     regs, address);
-               }
-
+       if (likely(!(fault & VM_FAULT_ERROR)))
                /* Normal return path: fault Handled Gracefully */
                return;
-       }
 
        if (!user_mode(regs))
                goto no_context;
index 00602a6..b1147b7 100644 (file)
@@ -84,9 +84,8 @@ endif
 
 # -fstack-protector-strong triggers protection checks in this code,
 # but it is being used too early to link to meaningful stack_chk logic.
-nossp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector
 $(foreach o, $(libfdt_objs) atags_to_fdt.o, \
-       $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt $(nossp-flags-y)))
+       $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
 
 # These were previously generated C files. When you are building the kernel
 # with O=, make sure to remove the stale files in the output tree. Otherwise,
@@ -103,13 +102,9 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
 
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 
-ifeq ($(CONFIG_FUNCTION_TRACER),y)
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
-endif
-
 ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
             -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
index 45de2ff..b88d0ca 100644 (file)
                                am33xx_pinmux: pinmux@800 {
                                        compatible = "pinctrl-single";
                                        reg = <0x800 0x238>;
-                                       #pinctrl-cells = <1>;
+                                       #pinctrl-cells = <2>;
                                        pinctrl-single,register-width = <32>;
                                        pinctrl-single,function-mask = <0x7f>;
                                };
index f56ac39..4e49d6c 100644 (file)
@@ -3,7 +3,6 @@ CONFIG_LOCALVERSION="gum"
 CONFIG_SYSVIPC=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SHMEM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
index c675bc0..be666f5 100644 (file)
@@ -9,9 +9,6 @@ struct dev_archdata {
 #ifdef CONFIG_DMABOUNCE
        struct dmabounce_device_info *dmabounce;
 #endif
-#ifdef CONFIG_IOMMU_API
-       void *iommu; /* private IOMMU data */
-#endif
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
        struct dma_iommu_mapping        *mapping;
 #endif
index b5fdd30..a13d902 100644 (file)
@@ -76,7 +76,7 @@ static inline void set_fs(mm_segment_t fs)
        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 }
 
-#define segment_eq(a, b)       ((a) == (b))
+#define uaccess_kernel()       (get_fs() == KERNEL_DS)
 
 /*
  * We use 33-bit arithmetic here.  Success returns zero, failure returns
@@ -267,7 +267,7 @@ extern int __put_user_8(void *, unsigned long long);
  */
 #define USER_DS                        KERNEL_DS
 
-#define segment_eq(a, b)               (1)
+#define uaccess_kernel()       (true)
 #define __addr_ok(addr)                ((void)(addr), 1)
 #define __range_ok(addr, size) ((void)(addr), 0)
 #define get_fs()               (KERNEL_DS)
index 1b207cf..2134cbd 100644 (file)
@@ -113,7 +113,8 @@ static inline bool arm_vdso_hres_capable(void)
 }
 #define __arch_vdso_hres_capable arm_vdso_hres_capable
 
-static __always_inline u64 __arch_get_hw_counter(int clock_mode)
+static __always_inline u64 __arch_get_hw_counter(int clock_mode,
+                                                const struct vdso_data *vd)
 {
 #ifdef CONFIG_ARM_ARCH_TIMER
        u64 cycle_now;
index 3395be1..8e6ace0 100644 (file)
@@ -281,21 +281,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
        return 1;
 }
 
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
-       struct thread_info *thread = current_thread_info();
-       int used_math = thread->used_cp[1] | thread->used_cp[2];
-
-       if (used_math)
-               memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
-
-       return used_math != 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
 unsigned long get_wchan(struct task_struct *p)
 {
        struct stackframe frame;
index ab25689..c9dc912 100644 (file)
@@ -713,7 +713,9 @@ struct page *get_signal_page(void)
 /* Defer to generic check */
 asmlinkage void addr_limit_check_failed(void)
 {
+#ifdef CONFIG_MMU
        addr_limit_user_check();
+#endif
 }
 
 #ifdef CONFIG_DEBUG_RSEQ
index c6550ed..efa4020 100644 (file)
@@ -202,7 +202,8 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 
 static vm_fault_t __kprobes
 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
-               unsigned int flags, struct task_struct *tsk)
+               unsigned int flags, struct task_struct *tsk,
+               struct pt_regs *regs)
 {
        struct vm_area_struct *vma;
        vm_fault_t fault;
@@ -224,7 +225,7 @@ good_area:
                goto out;
        }
 
-       return handle_mm_fault(vma, addr & PAGE_MASK, flags);
+       return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
 
 check_stack:
        /* Don't allow expansion below FIRST_USER_ADDRESS */
@@ -266,6 +267,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
                flags |= FAULT_FLAG_WRITE;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -290,7 +293,7 @@ retry:
 #endif
        }
 
-       fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+       fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs);
 
        /* If we need to retry but a fatal signal is pending, handle the
         * signal first. We do not need to release the mmap_lock because
@@ -302,23 +305,7 @@ retry:
                return 0;
        }
 
-       /*
-        * Major/minor page fault accounting is only done on the
-        * initial attempt. If we go through a retry, it is extremely
-        * likely that the page will be found in page cache at that point.
-        */
-
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
        if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-                                       regs, addr);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-                                       regs, addr);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
                        goto retry;
index 7e8ee4a..171077c 100644 (file)
 146    common  writev                  sys_writev
 147    common  getsid                  sys_getsid
 148    common  fdatasync               sys_fdatasync
-149    common  _sysctl                 sys_sysctl
+149    common  _sysctl                 sys_ni_syscall
 150    common  mlock                   sys_mlock
 151    common  munlock                 sys_munlock
 152    common  mlockall                sys_mlockall
index 12b778d..9964987 100644 (file)
@@ -6,9 +6,6 @@
 #define __ASM_DEVICE_H
 
 struct dev_archdata {
-#ifdef CONFIG_IOMMU_API
-       void *iommu;                    /* private IOMMU data */
-#endif
 };
 
 struct pdev_archdata {
index 8d7c466..991dd5f 100644 (file)
@@ -50,7 +50,7 @@ static inline void set_fs(mm_segment_t fs)
                                CONFIG_ARM64_UAO));
 }
 
-#define segment_eq(a, b)       ((a) == (b))
+#define uaccess_kernel()       (get_fs() == KERNEL_DS)
 
 /*
  * Test whether a block of memory is a valid user space address.
index 17e81bd..734860a 100644 (file)
@@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev)
 __SYSCALL(__NR_getsid, sys_getsid)
 #define __NR_fdatasync 148
 __SYSCALL(__NR_fdatasync, sys_fdatasync)
-#define __NR__sysctl 149
-__SYSCALL(__NR__sysctl, compat_sys_sysctl)
+                       /* 149 was sys_sysctl */
+__SYSCALL(149, sys_ni_syscall)
 #define __NR_mlock 150
 __SYSCALL(__NR_mlock, sys_mlock)
 #define __NR_munlock 151
index 75cbae6..7508b0a 100644 (file)
@@ -103,7 +103,8 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
        return ret;
 }
 
-static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                                const struct vdso_data *vd)
 {
        u64 res;
 
index 9c29ad3..631ab12 100644 (file)
@@ -64,7 +64,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
-static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                                const struct vdso_data *vd)
 {
        u64 res;
 
index dab8826..7689f20 100644 (file)
@@ -180,7 +180,7 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
 
        /*
         * We didn't take an exception to get here, set PAN. UAO will be cleared
-        * by sdei_event_handler()s set_fs(USER_DS) call.
+        * by sdei_event_handler()s force_uaccess_begin() call.
         */
        __uaccess_enable_hw_pan();
 
index 8afb238..f07333e 100644 (file)
@@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
 #define VM_FAULT_BADACCESS     0x020000
 
 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
-                          unsigned int mm_flags, unsigned long vm_flags)
+                                 unsigned int mm_flags, unsigned long vm_flags,
+                                 struct pt_regs *regs)
 {
        struct vm_area_struct *vma = find_vma(mm, addr);
 
@@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
         */
        if (!(vma->vm_flags & vm_flags))
                return VM_FAULT_BADACCESS;
-       return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
+       return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 {
        const struct fault_info *inf;
        struct mm_struct *mm = current->mm;
-       vm_fault_t fault, major = 0;
+       vm_fault_t fault;
        unsigned long vm_flags = VM_ACCESS_FLAGS;
        unsigned int mm_flags = FAULT_FLAG_DEFAULT;
 
@@ -516,8 +517,7 @@ retry:
 #endif
        }
 
-       fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
-       major |= fault & VM_FAULT_MAJOR;
+       fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
 
        /* Quick path to respond to signals */
        if (fault_signal_pending(fault, regs)) {
@@ -538,25 +538,8 @@ retry:
         * Handle the "normal" (no error) case first.
         */
        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
-                             VM_FAULT_BADACCESS)))) {
-               /*
-                * Major/minor page fault accounting is only done
-                * once. If we go through a retry, it is extremely
-                * likely that the page will be found in page cache at
-                * that point.
-                */
-               if (major) {
-                       current->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
-                                     addr);
-               } else {
-                       current->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
-                                     addr);
-               }
-
+                             VM_FAULT_BADACCESS))))
                return 0;
-       }
 
        /*
         * If we are in kernel mode at this point, we have no context to
index aafcee3..73f8b49 100644 (file)
@@ -461,13 +461,3 @@ void __init arm64_numa_init(void)
 
        numa_init(dummy_numa_init);
 }
-
-/*
- * We hope that we will be hotplugging memory on nodes we already know about,
- * such that acpi_get_node() succeeds and we never fall back to this...
- */
-int memory_add_physaddr_to_nid(u64 addr)
-{
-       pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr);
-       return 0;
-}
index db2640d..79ede9b 100644 (file)
@@ -13,6 +13,6 @@ typedef struct {
 #define USER_DS                        ((mm_segment_t) { 0x80000000UL })
 #define get_fs()               (current_thread_info()->addr_limit)
 #define set_fs(x)              (current_thread_info()->addr_limit = (x))
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 #endif /* __ASM_CSKY_SEGMENT_H */
index b1dce9f..081b178 100644 (file)
@@ -150,7 +150,8 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
+                               regs);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -160,16 +161,6 @@ good_area:
                        goto bad_area;
                BUG();
        }
-       if (fault & VM_FAULT_MAJOR) {
-               tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
-                             address);
-       } else {
-               tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
-                             address);
-       }
-
        mmap_read_unlock(mm);
        return;
 
index a407978..3795072 100644 (file)
@@ -33,7 +33,7 @@ static inline mm_segment_t get_fs(void)
        return USER_DS;
 }
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 #endif /* __ASSEMBLY__ */
 
index d294e71..dfd322c 100644 (file)
@@ -154,15 +154,6 @@ unsigned long get_wchan(struct task_struct *p)
 }
 
 /*
- * Required placeholder.
- */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-       return 0;
-}
-
-
-/*
  * Called on the exit path of event entry; see vm_entry.S
  *
  * Interrupts will already be disabled.
index cd3808f..ef32c5a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/signal.h>
 #include <linux/extable.h>
 #include <linux/hardirq.h>
+#include <linux/perf_event.h>
 
 /*
  * Decode of hardware exception sends us to one of several
@@ -53,6 +54,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
@@ -88,7 +91,7 @@ good_area:
                break;
        }
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -96,10 +99,6 @@ good_area:
        /* The most common case -- we are done. */
        if (likely(!(fault & VM_FAULT_ERROR))) {
                if (flags & FAULT_FLAG_ALLOW_RETRY) {
-                       if (fault & VM_FAULT_MAJOR)
-                               current->maj_flt++;
-                       else
-                               current->min_flt++;
                        if (fault & VM_FAULT_RETRY) {
                                flags |= FAULT_FLAG_TRIED;
                                goto retry;
index 3eb3974..918b198 100644 (file)
@@ -6,9 +6,6 @@
 #define _ASM_IA64_DEVICE_H
 
 struct dev_archdata {
-#ifdef CONFIG_IOMMU_API
-       void *iommu; /* hook for IOMMU specific extension */
-#endif
 };
 
 struct pdev_archdata {
index 1085089..779b697 100644 (file)
@@ -366,6 +366,15 @@ pgd_index (unsigned long address)
 }
 #define pgd_index pgd_index
 
+/*
+ * In the kernel's mapped region we know everything is in region number 5, so
+ * as an optimisation its PGD already points to the area for that region.
+ * However, this also means that we cannot use pgd_index() and we must
+ * never add the region here.
+ */
+#define pgd_offset_k(addr) \
+       (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
 /* Look up a pgd entry in the gate area.  On IA-64, the gate-area
    resides in the kernel-mapped segment, hence we use pgd_offset_k()
    here.  */
index 7847ae4..aa92234 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/bitops.h>
 #include <linux/irqreturn.h>
 
-#include <asm/io.h>
 #include <asm/param.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
@@ -44,11 +43,6 @@ ia64_get_lid (void)
 
 #ifdef CONFIG_SMP
 
-#define XTP_OFFSET             0x1e0008
-
-#define SMP_IRQ_REDIRECTION    (1 << 0)
-#define SMP_IPI_REDIRECTION    (1 << 1)
-
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern struct smp_boot_data {
@@ -62,7 +56,6 @@ extern cpumask_t cpu_core_map[NR_CPUS];
 DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
 extern int smp_num_siblings;
 extern void __iomem *ipi_base_addr;
-extern unsigned char smp_int_redirect;
 
 extern volatile int ia64_cpu_to_sapicid[];
 #define cpu_physical_id(i)     ia64_cpu_to_sapicid[i]
@@ -84,34 +77,6 @@ cpu_logical_id (int cpuid)
        return i;
 }
 
-/*
- * XTP control functions:
- *     min_xtp   : route all interrupts to this CPU
- *     normal_xtp: nominal XTP value
- *     max_xtp   : never deliver interrupts to this CPU.
- */
-
-static inline void
-min_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
-}
-
-static inline void
-normal_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
-}
-
-static inline void
-max_xtp (void)
-{
-       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
-               writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
-}
-
 /* Upping and downing of CPUs */
 extern int __cpu_disable (void);
 extern void __cpu_die (unsigned int cpu);
index 8aa473a..179243c 100644 (file)
@@ -50,7 +50,7 @@
 #define get_fs()  (current_thread_info()->addr_limit)
 #define set_fs(x) (current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 /*
  * When accessing user memory, we need to make sure the entire area really is in
diff --git a/arch/ia64/include/asm/xtp.h b/arch/ia64/include/asm/xtp.h
new file mode 100644 (file)
index 0000000..5bf1d70
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_XTP_H
+#define _ASM_IA64_XTP_H
+
+#include <asm/io.h>
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET             0x1e0008
+
+#define SMP_IRQ_REDIRECTION    (1 << 0)
+#define SMP_IPI_REDIRECTION    (1 << 1)
+
+extern unsigned char smp_int_redirect;
+
+/*
+ * XTP control functions:
+ *     min_xtp   : route all interrupts to this CPU
+ *     normal_xtp: nominal XTP value
+ *     max_xtp   : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+       if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+               writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_IA64_XTP_Hy */
index fad4db2..35adcf8 100644 (file)
@@ -95,6 +95,7 @@
 #include <asm/iosapic.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/xtp.h>
 
 #undef DEBUG_INTERRUPT_ROUTING
 
index 0a8e5e5..ecef17c 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/kernel_stat.h>
 
 #include <asm/mca.h>
+#include <asm/xtp.h>
 
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
index ec0b40f..f19cb97 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/uaccess.h>
 #include <asm/unwind.h>
 #include <asm/user.h>
+#include <asm/xtp.h>
 
 #include "entry.h"
 
@@ -531,51 +532,17 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *
 }
 
 void
-do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
-{
-       elf_fpreg_t *dst = arg;
-       int i;
-
-       memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
-
-       if (unw_unwind_to_user(info) < 0)
-               return;
-
-       /* f0 is 0.0, f1 is 1.0 */
-
-       for (i = 2; i < 32; ++i)
-               unw_get_fr(info, i, dst + i);
-
-       ia64_flush_fph(task);
-       if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
-               memcpy(dst + 32, task->thread.fph, 96*16);
-}
-
-void
 do_copy_regs (struct unw_frame_info *info, void *arg)
 {
        do_copy_task_regs(current, info, arg);
 }
 
 void
-do_dump_fpu (struct unw_frame_info *info, void *arg)
-{
-       do_dump_task_fpu(current, info, arg);
-}
-
-void
 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
 {
        unw_init_running(do_copy_regs, dst);
 }
 
-int
-dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
-{
-       unw_init_running(do_dump_fpu, dst);
-       return 1;       /* f0-f31 are always valid so we always return 1 */
-}
-
 /*
  * Flush thread state.  This is called when a thread does an execve().
  */
index c455ece..e4f0705 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/page.h>
 #include <asm/sal.h>
 #include <asm/pal.h>
+#include <asm/xtp.h>
 
  __cacheline_aligned DEFINE_SPINLOCK(sal_lock);
 unsigned long sal_platform_features;
index d2d440f..dd595fb 100644 (file)
@@ -65,6 +65,7 @@
 #include <asm/tlbflush.h>
 #include <asm/unistd.h>
 #include <asm/uv/uv.h>
+#include <asm/xtp.h>
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 # error "struct cpuinfo_ia64 too big!"
index 0e27420..7b7b64e 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/tlbflush.h>
 #include <asm/unistd.h>
 #include <asm/mca.h>
+#include <asm/xtp.h>
 
 /*
  * Note: alignment of 4 entries/cacheline was empirically determined
index ced9c83..f52a41f 100644 (file)
 123    common  writev                          sys_writev
 124    common  pread64                         sys_pread64
 125    common  pwrite64                        sys_pwrite64
-126    common  _sysctl                         sys_sysctl
+126    common  _sysctl                         sys_ni_syscall
 127    common  mmap                            sys_mmap
 128    common  munmap                          sys_munmap
 129    common  mlock                           sys_mlock
index 3a4dec3..cd9766d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kdebug.h>
 #include <linux/prefetch.h>
 #include <linux/uaccess.h>
+#include <linux/perf_event.h>
 
 #include <asm/processor.h>
 #include <asm/exception.h>
@@ -105,6 +106,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
                flags |= FAULT_FLAG_USER;
        if (mask & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
 
@@ -143,7 +146,7 @@ retry:
         * sure we exit gracefully rather than endlessly redo the
         * fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -166,10 +169,6 @@ retry:
        }
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 5e1015e..f349642 100644 (file)
@@ -106,7 +106,5 @@ int memory_add_physaddr_to_nid(u64 addr)
                return 0;
        return nid;
 }
-
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 #endif
 #endif
index 6663f17..6f2f38d 100644 (file)
@@ -16,6 +16,7 @@ config M68K
        select HAVE_DEBUG_BUGVERBOSE
        select GENERIC_IRQ_SHOW
        select GENERIC_ATOMIC64
+       select NO_DMA if !MMU && !COLDFIRE
        select HAVE_UID16
        select VIRT_TO_BUS
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
@@ -59,9 +60,6 @@ config TIME_LOW_RES
 config NO_IOPORT_MAP
        def_bool y
 
-config NO_DMA
-       def_bool (MMU && SUN3) || (!MMU && !COLDFIRE)
-
 config ZONE_DMA
        bool
        default y
index a82651d..17e8c3a 100644 (file)
@@ -126,6 +126,7 @@ config SUN3
        depends on MMU
        depends on !MMU_MOTOROLA
        select MMU_SUN3 if MMU
+       select NO_DMA
        select M68020
        help
          This option enables support for the Sun 3 series of workstations
index c668655..2b5e68a 100644 (file)
@@ -52,7 +52,7 @@ static inline void set_fs(mm_segment_t val)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
 #endif
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 #endif /* __ASSEMBLY__ */
 
index 191e75a..5337bc2 100644 (file)
@@ -85,10 +85,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
        if (vma->vm_mm == current->active_mm) {
-               mm_segment_t old_fs = get_fs();
-               set_fs(USER_DS);
+               mm_segment_t old_fs = force_uaccess_begin();
+
                __flush_tlb_one(addr);
-               set_fs(old_fs);
+               force_uaccess_end(old_fs);
        }
 }
 
index 1a4822d..81fc799 100644 (file)
 146    common  writev                          sys_writev
 147    common  getsid                          sys_getsid
 148    common  fdatasync                       sys_fdatasync
-149    common  _sysctl                         sys_sysctl
+149    common  _sysctl                         sys_ni_syscall
 150    common  mlock                           sys_mlock
 151    common  munlock                         sys_munlock
 152    common  mlockall                        sys_mlockall
index 508abb6..795f483 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
+#include <linux/perf_event.h>
 
 #include <asm/setup.h>
 #include <asm/traps.h>
@@ -84,6 +85,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
 
@@ -134,7 +137,7 @@ good_area:
         * the fault.
         */
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
        pr_debug("handle_mm_fault returns %x\n", fault);
 
        if (fault_signal_pending(fault, regs))
@@ -150,16 +153,7 @@ good_area:
                BUG();
        }
 
-       /*
-        * Major/minor page fault accounting is only done on the
-        * initial attempt. If we go through a retry, it is extremely
-        * likely that the page will be found in page cache at that point.
-        */
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 6723c56..304b04f 100644 (file)
@@ -41,7 +41,7 @@
 # define get_fs()      (current_thread_info()->addr_limit)
 # define set_fs(val)   (current_thread_info()->addr_limit = (val))
 
-# define segment_eq(a, b)      ((a).seg == (b).seg)
+# define uaccess_kernel()      (get_fs().seg == KERNEL_DS.seg)
 
 #ifndef CONFIG_MMU
 
index a3f4be8..b4e2639 100644 (file)
 146    common  writev                          sys_writev
 147    common  getsid                          sys_getsid
 148    common  fdatasync                       sys_fdatasync
-149    common  _sysctl                         sys_sysctl
+149    common  _sysctl                         sys_ni_syscall
 150    common  mlock                           sys_mlock
 151    common  munlock                         sys_munlock
 152    common  mlockall                        sys_mlockall
index a2bfe58..b3fed2c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <linux/perf_event.h>
 
 #include <asm/page.h>
 #include <asm/mmu.h>
@@ -121,6 +122,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -214,7 +217,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -230,10 +233,6 @@ good_area:
        }
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (unlikely(fault & VM_FAULT_MAJOR))
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 6b471cd..e924c81 100644 (file)
@@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 62b298c..61fc01f 100644 (file)
@@ -72,7 +72,7 @@ extern u64 __ua_limit;
 #define get_fs()       (current_thread_info()->addr_limit)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
 
 /*
  * eva_kernel_access() - determine whether kernel memory access on an EVA system
index c63ddca..2203e2d 100644 (file)
@@ -167,7 +167,8 @@ static __always_inline u64 read_gic_count(const struct vdso_data *data)
 
 #endif
 
-static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                                const struct vdso_data *vd)
 {
 #ifdef CONFIG_CSRC_R4K
        if (clock_mode == VDSO_CLOCKMODE_R4K)
@@ -175,7 +176,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
 #endif
 #ifdef CONFIG_CLKSRC_MIPS_GIC
        if (clock_mode == VDSO_CLOCKMODE_GIC)
-               return read_gic_count(get_vdso_data());
+               return read_gic_count(vd);
 #endif
        /*
         * Core checks mode already. So this raced against a concurrent
index 6b4ee92..f9df9ed 100644 (file)
 149    n32     munlockall                      sys_munlockall
 150    n32     vhangup                         sys_vhangup
 151    n32     pivot_root                      sys_pivot_root
-152    n32     _sysctl                         compat_sys_sysctl
+152    n32     _sysctl                         sys_ni_syscall
 153    n32     prctl                           sys_prctl
 154    n32     adjtimex                        sys_adjtimex_time32
 155    n32     setrlimit                       compat_sys_setrlimit
index 391acbf..557f995 100644 (file)
 149    n64     munlockall                      sys_munlockall
 150    n64     vhangup                         sys_vhangup
 151    n64     pivot_root                      sys_pivot_root
-152    n64     _sysctl                         sys_sysctl
+152    n64     _sysctl                         sys_ni_syscall
 153    n64     prctl                           sys_prctl
 154    n64     adjtimex                        sys_adjtimex
 155    n64     setrlimit                       sys_setrlimit
index 5727c51..195b43c 100644 (file)
 150    o32     unused150                       sys_ni_syscall
 151    o32     getsid                          sys_getsid
 152    o32     fdatasync                       sys_fdatasync
-153    o32     _sysctl                         sys_sysctl                      compat_sys_sysctl
+153    o32     _sysctl                         sys_ni_syscall
 154    o32     mlock                           sys_mlock
 155    o32     munlock                         sys_munlock
 156    o32     mlockall                        sys_mlockall
index 0adce60..126a5f3 100644 (file)
@@ -191,17 +191,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                         * memory, so we need to "switch" the address limit to
                         * user space, so that address check can work properly.
                         */
-                       seg = get_fs();
-                       set_fs(USER_DS);
+                       seg = force_uaccess_begin();
                        switch (insn.spec3_format.func) {
                        case lhe_op:
                                if (!access_ok(addr, 2)) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto sigbus;
                                }
                                LoadHWE(addr, value, res);
                                if (res) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto fault;
                                }
                                compute_return_epc(regs);
@@ -209,12 +208,12 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                break;
                        case lwe_op:
                                if (!access_ok(addr, 4)) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto sigbus;
                                }
                                LoadWE(addr, value, res);
                                if (res) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto fault;
                                }
                                compute_return_epc(regs);
@@ -222,12 +221,12 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                break;
                        case lhue_op:
                                if (!access_ok(addr, 2)) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto sigbus;
                                }
                                LoadHWUE(addr, value, res);
                                if (res) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto fault;
                                }
                                compute_return_epc(regs);
@@ -235,35 +234,35 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                                break;
                        case she_op:
                                if (!access_ok(addr, 2)) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto sigbus;
                                }
                                compute_return_epc(regs);
                                value = regs->regs[insn.spec3_format.rt];
                                StoreHWE(addr, value, res);
                                if (res) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto fault;
                                }
                                break;
                        case swe_op:
                                if (!access_ok(addr, 4)) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto sigbus;
                                }
                                compute_return_epc(regs);
                                value = regs->regs[insn.spec3_format.rt];
                                StoreWE(addr, value, res);
                                if (res) {
-                                       set_fs(seg);
+                                       force_uaccess_end(seg);
                                        goto fault;
                                }
                                break;
                        default:
-                               set_fs(seg);
+                               force_uaccess_end(seg);
                                goto sigill;
                        }
-                       set_fs(seg);
+                       force_uaccess_end(seg);
                }
 #endif
                break;
index 01b168a..7c871b1 100644 (file)
@@ -96,6 +96,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
@@ -152,12 +154,11 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -168,15 +169,6 @@ good_area:
                BUG();
        }
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-                                                 regs, address);
-                       tsk->maj_flt++;
-               } else {
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-                                                 regs, address);
-                       tsk->min_flt++;
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 2e64c76..57fe832 100644 (file)
@@ -35,8 +35,7 @@ cflags-vdso := $(ccflags-vdso) \
        -O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
        -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
        -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
-       $(call cc-option, -fno-asynchronous-unwind-tables) \
-       $(call cc-option, -fno-stack-protector)
+       $(call cc-option, -fno-asynchronous-unwind-tables)
 aflags-vdso := $(ccflags-vdso) \
        -D__ASSEMBLY__ -Wa,-gdwarf-2
 
index 3a9219f..010ba5f 100644 (file)
@@ -44,7 +44,7 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b)       ((a) == (b))
+#define uaccess_kernel()       (get_fs() == KERNEL_DS)
 
 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
 
index e85bbba..e01ad5d 100644 (file)
@@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs)
                regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
        pr_info("  IRQs o%s  Segment %s\n",
                interrupts_enabled(regs) ? "n" : "ff",
-               segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user");
+               uaccess_kernel() ? "kernel" : "user");
 }
 
 EXPORT_SYMBOL(show_regs);
index c8b9061..1eb7ded 100644 (file)
@@ -512,7 +512,7 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs)
 {
        unsigned long inst;
        int ret = -EFAULT;
-       mm_segment_t seg = get_fs();
+       mm_segment_t seg;
 
        inst = get_inst(regs->ipc);
 
@@ -520,13 +520,12 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs)
              "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr,
              regs->ipc, inst);
 
-       set_fs(USER_DS);
-
+       seg = force_uaccess_begin();
        if (inst & NDS32_16BIT_INSTRUCTION)
                ret = do_16((inst >> 16) & 0xffff, regs);
        else
                ret = do_32(inst, regs);
-       set_fs(seg);
+       force_uaccess_end(seg);
 
        return ret;
 }
index 8fb73f6..f02524e 100644 (file)
@@ -121,6 +121,8 @@ void do_page_fault(unsigned long entry, unsigned long addr,
        if (unlikely(faulthandler_disabled() || !mm))
                goto no_context;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
        /*
         * As per x86, we may deadlock here. However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -206,7 +208,7 @@ good_area:
         * the fault.
         */
 
-       fault = handle_mm_fault(vma, addr, flags);
+       fault = handle_mm_fault(vma, addr, flags, regs);
 
        /*
         * If we need to retry but a fatal signal is pending, handle the
@@ -228,22 +230,7 @@ good_area:
                        goto bad_area;
        }
 
-       /*
-        * Major/minor page fault accounting is only done on the initial
-        * attempt. If we go through a retry, it is extremely likely that the
-        * page will be found in page cache at that point.
-        */
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
-                                     1, regs, addr);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-                                     1, regs, addr);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index e83f831..a741abb 100644 (file)
@@ -30,7 +30,7 @@
 #define get_fs()               (current_thread_info()->addr_limit)
 #define set_fs(seg)            (current_thread_info()->addr_limit = (seg))
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define __access_ok(addr, len)                 \
        (((signed long)(((long)get_fs().seg) &  \
index 0a42ab8..88a4ec0 100644 (file)
@@ -252,14 +252,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
        regs->sp = sp;
 }
 
-#include <linux/elfcore.h>
-
-/* Fill in the FPU structure for a core dump. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
-{
-       return 0; /* Nios2 has no FPU and thus no FPU registers */
-}
-
 asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp,
                           int __user *parent_tidptr, int __user *child_tidptr,
                           unsigned long tls)
index 4112ef0..9476fee 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mm.h>
 #include <linux/extable.h>
 #include <linux/uaccess.h>
+#include <linux/perf_event.h>
 
 #include <asm/mmu_context.h>
 #include <asm/traps.h>
@@ -83,6 +84,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
        if (!mmap_read_trylock(mm)) {
                if (!user_mode(regs) && !search_exception_tables(regs->ea))
                        goto bad_area_nosemaphore;
@@ -131,7 +134,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -146,16 +149,7 @@ good_area:
                BUG();
        }
 
-       /*
-        * Major/minor page fault accounting is only done on the
-        * initial attempt. If we go through a retry, it is extremely
-        * likely that the page will be found in page cache at that point.
-        */
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index db02fb2..7d6b4a7 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __ASM_OPENRISC_IO_H
 #define __ASM_OPENRISC_IO_H
 
+#include <linux/types.h>
+
 /*
  * PCI: can we really do 0 here if we have no port IO?
  */
 #define PIO_OFFSET             0
 #define PIO_MASK               0
 
-#include <asm-generic/io.h>
-
+#define ioremap ioremap
 void __iomem *ioremap(phys_addr_t offset, unsigned long size);
+
+#define iounmap iounmap
 extern void iounmap(void *addr);
 
+#include <asm-generic/io.h>
+
 #endif
index 17c24f1..f039021 100644 (file)
 #define get_fs()       (current_thread_info()->addr_limit)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
 
-#define segment_eq(a, b)       ((a) == (b))
+#define uaccess_kernel()       (get_fs() == KERNEL_DS)
 
 /* Ensure that the range from addr to addr+size is all within the process'
  * address space
  */
-#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
+static inline int __range_ok(unsigned long addr, unsigned long size)
+{
+       const mm_segment_t fs = get_fs();
 
-/* Ensure that addr is below task's addr_limit */
-#define __addr_ok(addr) ((unsigned long) addr < get_fs())
+       return size <= fs && addr <= (fs - size);
+}
 
 #define access_ok(addr, size)                                          \
 ({                                                                     \
-       unsigned long __ao_addr = (unsigned long)(addr);                \
-       unsigned long __ao_size = (unsigned long)(size);                \
-       __range_ok(__ao_addr, __ao_size);                               \
+       __chk_user_ptr(addr);                                           \
+       __range_ok((unsigned long)(addr), (size));                      \
 })
 
 /*
@@ -100,7 +101,7 @@ extern long __put_user_bad(void);
 #define __put_user_check(x, ptr, size)                                 \
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
-       __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
+       __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
        if (access_ok(__pu_addr, size))                 \
                __put_user_size((x), __pu_addr, (size), __pu_err);      \
        __pu_err;                                                       \
@@ -173,7 +174,7 @@ struct __large_struct {
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
        long __gu_err = -EFAULT, __gu_val = 0;                          \
-       const __typeof__(*(ptr)) * __gu_addr = (ptr);                   \
+       const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
        if (access_ok(__gu_addr, size))                 \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
@@ -241,17 +242,17 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long size)
        return __copy_tofrom_user(to, (__force const void *)from, size);
 }
 static inline unsigned long
-raw_copy_to_user(void *to, const void __user *from, unsigned long size)
+raw_copy_to_user(void __user *to, const void *from, unsigned long size)
 {
        return __copy_tofrom_user((__force void *)to, from, size);
 }
 #define INLINE_COPY_FROM_USER
 #define INLINE_COPY_TO_USER
 
-extern unsigned long __clear_user(void *addr, unsigned long size);
+extern unsigned long __clear_user(void __user *addr, unsigned long size);
 
 static inline __must_check unsigned long
-clear_user(void *addr, unsigned long size)
+clear_user(void __user *addr, unsigned long size)
 {
        if (likely(access_ok(addr, size)))
                size = __clear_user(addr, size);
index 848f74c..0ff391f 100644 (file)
@@ -214,13 +214,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
        regs->sp = sp;
 }
 
-/* Fill in the fpu structure for a core dump.  */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
-{
-       /* TODO */
-       return 0;
-}
-
 extern struct thread_info *_switch(struct thread_info *old_ti,
                                   struct thread_info *new_ti);
 extern int lwa_flag;
index 8aa438e..b18e775 100644 (file)
@@ -292,13 +292,15 @@ void __init setup_arch(char **cmdline_p)
        init_mm.brk = (unsigned long)_end;
 
 #ifdef CONFIG_BLK_DEV_INITRD
-       initrd_start = (unsigned long)&__initrd_start;
-       initrd_end = (unsigned long)&__initrd_end;
        if (initrd_start == initrd_end) {
+               printk(KERN_INFO "Initial ramdisk not found\n");
                initrd_start = 0;
                initrd_end = 0;
+       } else {
+               printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
+                      (void *)(initrd_start), initrd_end - initrd_start);
+               initrd_below_start_ok = 1;
        }
-       initrd_below_start_ok = 1;
 #endif
 
        /* setup memblock allocator */
index 4f07548..97804f2 100644 (file)
@@ -68,7 +68,7 @@ static int restore_sigcontext(struct pt_regs *regs,
 
 asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
 {
-       struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
+       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp;
        sigset_t set;
 
        /*
@@ -76,7 +76,7 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
         * then frame should be dword aligned here.  If it's
         * not, then the user is trying to mess with us.
         */
-       if (((long)frame) & 3)
+       if (((unsigned long)frame) & 3)
                goto badframe;
 
        if (!access_ok(frame, sizeof(*frame)))
@@ -151,7 +151,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
 static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                          struct pt_regs *regs)
 {
-       struct rt_sigframe *frame;
+       struct rt_sigframe __user *frame;
        unsigned long return_ip;
        int err = 0;
 
@@ -181,10 +181,10 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                l.ori r11,r0,__NR_sigreturn
                l.sys 1
         */
-       err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
-       err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
-       err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
-       err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+       err |= __put_user(0xa960,             (short __user *)(frame->retcode + 0));
+       err |= __put_user(__NR_rt_sigreturn,  (short __user *)(frame->retcode + 2));
+       err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4));
+       err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8));
 
        if (err)
                return -EFAULT;
index bd1e660..29c82ef 100644 (file)
@@ -219,30 +219,99 @@ static inline void ipi_flush_tlb_all(void *ignored)
        local_flush_tlb_all();
 }
 
+static inline void ipi_flush_tlb_mm(void *info)
+{
+       struct mm_struct *mm = (struct mm_struct *)info;
+
+       local_flush_tlb_mm(mm);
+}
+
+static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
+{
+       unsigned int cpuid;
+
+       if (cpumask_empty(cmask))
+               return;
+
+       cpuid = get_cpu();
+
+       if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
+               /* local cpu is the only cpu present in cpumask */
+               local_flush_tlb_mm(mm);
+       } else {
+               on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
+       }
+       put_cpu();
+}
+
+struct flush_tlb_data {
+       unsigned long addr1;
+       unsigned long addr2;
+};
+
+static inline void ipi_flush_tlb_page(void *info)
+{
+       struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+       local_flush_tlb_page(NULL, fd->addr1);
+}
+
+static inline void ipi_flush_tlb_range(void *info)
+{
+       struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+       local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
+}
+
+static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
+                               unsigned long end)
+{
+       unsigned int cpuid;
+
+       if (cpumask_empty(cmask))
+               return;
+
+       cpuid = get_cpu();
+
+       if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
+               /* local cpu is the only cpu present in cpumask */
+               if ((end - start) <= PAGE_SIZE)
+                       local_flush_tlb_page(NULL, start);
+               else
+                       local_flush_tlb_range(NULL, start, end);
+       } else {
+               struct flush_tlb_data fd;
+
+               fd.addr1 = start;
+               fd.addr2 = end;
+
+               if ((end - start) <= PAGE_SIZE)
+                       on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
+               else
+                       on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
+       }
+       put_cpu();
+}
+
 void flush_tlb_all(void)
 {
        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 }
 
-/*
- * FIXME: implement proper functionality instead of flush_tlb_all.
- * *But*, as things currently stands, the local_tlb_flush_* functions will
- * all boil down to local_tlb_flush_all anyway.
- */
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+       smp_flush_tlb_mm(mm_cpumask(mm), mm);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
-       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+       smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
 }
 
 void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
 {
-       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+       smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
 }
 
 /* Instruction cache invalidate - performed on each cpu */
index 43f140a..54d3880 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
 
 #include <asm/processor.h>
@@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        unsigned long *sp = NULL;
 
+       if (!try_get_task_stack(tsk))
+               return;
+
        if (tsk == current)
                sp = (unsigned long *) &sp;
-       else
-               sp = (unsigned long *) KSTK_ESP(tsk);
+       else {
+               unsigned long ksp;
+
+               /* Locate stack from kernel context */
+               ksp = task_thread_info(tsk)->ksp;
+               ksp += STACK_FRAME_OVERHEAD;    /* redzone */
+               ksp += sizeof(struct pt_regs);
+
+               sp = (unsigned long *) ksp;
+       }
 
        unwind_stack(trace, sp, save_stack_address_nosched);
+
+       put_task_stack(tsk);
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
index 60449fd..22fbc5f 100644 (file)
@@ -96,18 +96,6 @@ SECTIONS
 
         __init_end = .;
 
-       . = ALIGN(PAGE_SIZE);
-       .initrd                 : AT(ADDR(.initrd) - LOAD_OFFSET)
-       {
-               __initrd_start = .;
-               *(.initrd)
-               __initrd_end = .;
-               FILL (0);
-                . = ALIGN (PAGE_SIZE);
-       }
-
-        __vmlinux_end = .;            /* last address of the physical file */
-
        BSS_SECTION(0, 0, 0x20)
 
         _end = .;
index d2224cc..ca97d9b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/extable.h>
 #include <linux/sched/signal.h>
+#include <linux/perf_event.h>
 
 #include <linux/uaccess.h>
 #include <asm/siginfo.h>
@@ -103,6 +104,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
        if (in_interrupt() || !mm)
                goto no_context;
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
 retry:
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
@@ -159,7 +162,7 @@ good_area:
         * the fault.
         */
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -176,10 +179,6 @@ good_area:
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
                /*RGD modeled on Cris */
-               if (fault & VM_FAULT_MAJOR)
-                       tsk->maj_flt++;
-               else
-                       tsk->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 4b680ae..2b6feab 100644 (file)
@@ -137,21 +137,28 @@ void local_flush_tlb_mm(struct mm_struct *mm)
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
               struct task_struct *next_tsk)
 {
+       unsigned int cpu;
+
+       if (unlikely(prev == next))
+               return;
+
+       cpu = smp_processor_id();
+
+       cpumask_clear_cpu(cpu, mm_cpumask(prev));
+       cpumask_set_cpu(cpu, mm_cpumask(next));
+
        /* remember the pgd for the fault handlers
         * this is similar to the pgd register in some other CPU's.
         * we need our own copy of it because current and active_mm
         * might be invalid at points where we still need to derefer
         * the pgd.
         */
-       current_pgd[smp_processor_id()] = next->pgd;
+       current_pgd[cpu] = next->pgd;
 
        /* We don't have context support implemented, so flush all
         * entries belonging to previous map
         */
-
-       if (prev != next)
-               local_flush_tlb_mm(prev);
-
+       local_flush_tlb_mm(prev);
 }
 
 /*
index 0386232..21b375c 100644 (file)
@@ -34,13 +34,13 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 /* Can't use raw_spin_lock_irq because of #include problems, so
  * this is the substitute */
 #define _atomic_spin_lock_irqsave(l,f) do {    \
-       arch_spinlock_t *s = ATOMIC_HASH(l);            \
+       arch_spinlock_t *s = ATOMIC_HASH(l);    \
        local_irq_save(f);                      \
        arch_spin_lock(s);                      \
 } while(0)
 
 #define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       arch_spinlock_t *s = ATOMIC_HASH(l);                    \
+       arch_spinlock_t *s = ATOMIC_HASH(l);            \
        arch_spin_unlock(s);                            \
        local_irq_restore(f);                           \
 } while(0)
@@ -85,7 +85,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v)                        \
        _atomic_spin_lock_irqsave(v, flags);                            \
        v->counter c_op i;                                              \
        _atomic_spin_unlock_irqrestore(v, flags);                       \
-}                                                                      \
+}
 
 #define ATOMIC_OP_RETURN(op, c_op)                                     \
 static __inline__ int atomic_##op##_return(int i, atomic_t *v)         \
@@ -148,7 +148,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v)          \
        _atomic_spin_lock_irqsave(v, flags);                            \
        v->counter c_op i;                                              \
        _atomic_spin_unlock_irqrestore(v, flags);                       \
-}                                                                      \
+}
 
 #define ATOMIC64_OP_RETURN(op, c_op)                                   \
 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)     \
index dbaaca8..640d46e 100644 (file)
 #define __smp_rmb()    mb()
 #define __smp_wmb()    mb()
 
+#define __smp_store_release(p, v)                                      \
+do {                                                                   \
+       typeof(p) __p = (p);                                            \
+        union { typeof(*p) __val; char __c[1]; } __u =                 \
+                { .__val = (__force typeof(*p)) (v) };                 \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("stb,ma %0,0(%1)"                          \
+                               : : "r"(*(__u8 *)__u.__c), "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("sth,ma %0,0(%1)"                          \
+                               : : "r"(*(__u16 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("stw,ma %0,0(%1)"                          \
+                               : : "r"(*(__u32 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("std,ma %0,0(%1)"                  \
+                               : : "r"(*(__u64 *)__u.__c), "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+} while (0)
+
+#define __smp_load_acquire(p)                                          \
+({                                                                     \
+       union { typeof(*p) __val; char __c[1]; } __u;                   \
+       typeof(p) __p = (p);                                            \
+       compiletime_assert_atomic_type(*p);                             \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile("ldb,ma 0(%1),%0"                          \
+                               : "=r"(*(__u8 *)__u.__c) : "r"(__p)     \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile("ldh,ma 0(%1),%0"                          \
+                               : "=r"(*(__u16 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile("ldw,ma 0(%1),%0"                          \
+                               : "=r"(*(__u32 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               if (IS_ENABLED(CONFIG_64BIT))                           \
+                       asm volatile("ldd,ma 0(%1),%0"                  \
+                               : "=r"(*(__u64 *)__u.__c) : "r"(__p)    \
+                               : "memory");                            \
+               break;                                                  \
+       }                                                               \
+       __u.__val;                                                      \
+})
 #include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */
index 301af07..3bd465a 100644 (file)
@@ -305,9 +305,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 struct task_struct;
 
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
 struct pt_regs;        /* forward declaration... */
 
 
index 116effe..45e20d3 100644 (file)
@@ -303,8 +303,8 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
 #define ioread64be ioread64be
 #define iowrite64 iowrite64
 #define iowrite64be iowrite64be
-extern u64 ioread64(void __iomem *addr);
-extern u64 ioread64be(void __iomem *addr);
+extern u64 ioread64(const void __iomem *addr);
+extern u64 ioread64be(const void __iomem *addr);
 extern void iowrite64(u64 val, void __iomem *addr);
 extern void iowrite64be(u64 val, void __iomem *addr);
 
index cc7ecc2..a6482b2 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <asm/cache.h>
 
+#define __HAVE_ARCH_PMD_ALLOC_ONE
 #define __HAVE_ARCH_PMD_FREE
 #define __HAVE_ARCH_PGD_FREE
 #include <asm-generic/pgalloc.h>
@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
                        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
 }
 
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER);
+}
+
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
        if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
index 45537cd..06b510f 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _ASMPARISC_TIMEX_H
 #define _ASMPARISC_TIMEX_H
 
+#include <asm/special_insns.h>
 
 #define CLOCK_TICK_RATE        1193180 /* Underlying HZ */
 
index ebbb9ff..ed2cd4f 100644 (file)
@@ -14,7 +14,7 @@
 #define KERNEL_DS      ((mm_segment_t){0})
 #define USER_DS        ((mm_segment_t){1})
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define get_fs()       (current_thread_info()->addr_limit)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
index 1df0f67..4bab21c 100644 (file)
@@ -64,7 +64,8 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
                                function_trace_op, regs);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+       if (dereference_function_descriptor(ftrace_graph_return) !=
+           dereference_function_descriptor(ftrace_stub) ||
            ftrace_graph_entry != ftrace_graph_entry_stub) {
                unsigned long *parent_rp;
 
index 86ec30c..f196d96 100644 (file)
@@ -152,25 +152,6 @@ void release_thread(struct task_struct *dead_task)
 }
 
 /*
- * Fill in the FPU structure for a core dump.
- */
-
-int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
-{
-       if (regs == NULL)
-               return 0;
-
-       memcpy(r, regs->fr, sizeof *r);
-       return 1;
-}
-
-int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
-{
-       memcpy(r, tsk->thread.regs.fr, sizeof(*r));
-       return 1;
-}
-
-/*
  * Idle thread support
  *
  * Detect when running on QEMU with SeaBIOS PDC Firmware and let
index 292baab..def64d2 100644 (file)
 146    common  writev                  sys_writev                      compat_sys_writev
 147    common  getsid                  sys_getsid
 148    common  fdatasync               sys_fdatasync
-149    common  _sysctl                 sys_sysctl                      compat_sys_sysctl
+149    common  _sysctl                 sys_ni_syscall
 150    common  mlock                   sys_mlock
 151    common  munlock                 sys_munlock
 152    common  mlockall                sys_mlockall
index 0195aec..ce40041 100644 (file)
 #endif
 
 struct iomap_ops {
-       unsigned int (*read8)(void __iomem *);
-       unsigned int (*read16)(void __iomem *);
-       unsigned int (*read16be)(void __iomem *);
-       unsigned int (*read32)(void __iomem *);
-       unsigned int (*read32be)(void __iomem *);
-       u64 (*read64)(void __iomem *);
-       u64 (*read64be)(void __iomem *);
+       unsigned int (*read8)(const void __iomem *);
+       unsigned int (*read16)(const void __iomem *);
+       unsigned int (*read16be)(const void __iomem *);
+       unsigned int (*read32)(const void __iomem *);
+       unsigned int (*read32be)(const void __iomem *);
+       u64 (*read64)(const void __iomem *);
+       u64 (*read64be)(const void __iomem *);
        void (*write8)(u8, void __iomem *);
        void (*write16)(u16, void __iomem *);
        void (*write16be)(u16, void __iomem *);
@@ -57,9 +57,9 @@ struct iomap_ops {
        void (*write32be)(u32, void __iomem *);
        void (*write64)(u64, void __iomem *);
        void (*write64be)(u64, void __iomem *);
-       void (*read8r)(void __iomem *, void *, unsigned long);
-       void (*read16r)(void __iomem *, void *, unsigned long);
-       void (*read32r)(void __iomem *, void *, unsigned long);
+       void (*read8r)(const void __iomem *, void *, unsigned long);
+       void (*read16r)(const void __iomem *, void *, unsigned long);
+       void (*read32r)(const void __iomem *, void *, unsigned long);
        void (*write8r)(void __iomem *, const void *, unsigned long);
        void (*write16r)(void __iomem *, const void *, unsigned long);
        void (*write32r)(void __iomem *, const void *, unsigned long);
@@ -69,17 +69,17 @@ struct iomap_ops {
 
 #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
 
-static unsigned int ioport_read8(void __iomem *addr)
+static unsigned int ioport_read8(const void __iomem *addr)
 {
        return inb(ADDR2PORT(addr));
 }
 
-static unsigned int ioport_read16(void __iomem *addr)
+static unsigned int ioport_read16(const void __iomem *addr)
 {
        return inw(ADDR2PORT(addr));
 }
 
-static unsigned int ioport_read32(void __iomem *addr)
+static unsigned int ioport_read32(const void __iomem *addr)
 {
        return inl(ADDR2PORT(addr));
 }
@@ -99,17 +99,17 @@ static void ioport_write32(u32 datum, void __iomem *addr)
        outl(datum, ADDR2PORT(addr));
 }
 
-static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
+static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count)
 {
        insb(ADDR2PORT(addr), dst, count);
 }
 
-static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
+static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count)
 {
        insw(ADDR2PORT(addr), dst, count);
 }
 
-static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
+static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count)
 {
        insl(ADDR2PORT(addr), dst, count);
 }
@@ -150,37 +150,37 @@ static const struct iomap_ops ioport_ops = {
 
 /* Legacy I/O memory ops */
 
-static unsigned int iomem_read8(void __iomem *addr)
+static unsigned int iomem_read8(const void __iomem *addr)
 {
        return readb(addr);
 }
 
-static unsigned int iomem_read16(void __iomem *addr)
+static unsigned int iomem_read16(const void __iomem *addr)
 {
        return readw(addr);
 }
 
-static unsigned int iomem_read16be(void __iomem *addr)
+static unsigned int iomem_read16be(const void __iomem *addr)
 {
        return __raw_readw(addr);
 }
 
-static unsigned int iomem_read32(void __iomem *addr)
+static unsigned int iomem_read32(const void __iomem *addr)
 {
        return readl(addr);
 }
 
-static unsigned int iomem_read32be(void __iomem *addr)
+static unsigned int iomem_read32be(const void __iomem *addr)
 {
        return __raw_readl(addr);
 }
 
-static u64 iomem_read64(void __iomem *addr)
+static u64 iomem_read64(const void __iomem *addr)
 {
        return readq(addr);
 }
 
-static u64 iomem_read64be(void __iomem *addr)
+static u64 iomem_read64be(const void __iomem *addr)
 {
        return __raw_readq(addr);
 }
@@ -220,7 +220,7 @@ static void iomem_write64be(u64 datum, void __iomem *addr)
        __raw_writel(datum, addr);
 }
 
-static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
+static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count)
 {
        while (count--) {
                *(u8 *)dst = __raw_readb(addr);
@@ -228,7 +228,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
        }
 }
 
-static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
+static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count)
 {
        while (count--) {
                *(u16 *)dst = __raw_readw(addr);
@@ -236,7 +236,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
        }
 }
 
-static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
+static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count)
 {
        while (count--) {
                *(u32 *)dst = __raw_readl(addr);
@@ -297,49 +297,49 @@ static const struct iomap_ops *iomap_ops[8] = {
 };
 
 
-unsigned int ioread8(void __iomem *addr)
+unsigned int ioread8(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
        return *((u8 *)addr);
 }
 
-unsigned int ioread16(void __iomem *addr)
+unsigned int ioread16(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
        return le16_to_cpup((u16 *)addr);
 }
 
-unsigned int ioread16be(void __iomem *addr)
+unsigned int ioread16be(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
        return *((u16 *)addr);
 }
 
-unsigned int ioread32(void __iomem *addr)
+unsigned int ioread32(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
        return le32_to_cpup((u32 *)addr);
 }
 
-unsigned int ioread32be(void __iomem *addr)
+unsigned int ioread32be(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
        return *((u32 *)addr);
 }
 
-u64 ioread64(void __iomem *addr)
+u64 ioread64(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
        return le64_to_cpup((u64 *)addr);
 }
 
-u64 ioread64be(void __iomem *addr)
+u64 ioread64be(const void __iomem *addr)
 {
        if (unlikely(INDIRECT_ADDR(addr)))
                return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
@@ -411,7 +411,7 @@ void iowrite64be(u64 datum, void __iomem *addr)
 
 /* Repeating interfaces */
 
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        if (unlikely(INDIRECT_ADDR(addr))) {
                iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
@@ -423,7 +423,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
        }
 }
 
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        if (unlikely(INDIRECT_ADDR(addr))) {
                iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
@@ -435,7 +435,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
        }
 }
 
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        if (unlikely(INDIRECT_ADDR(addr))) {
                iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
index 66ac071..4bfe2da 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 #include <linux/hugetlb.h>
+#include <linux/perf_event.h>
 
 #include <asm/traps.h>
 
@@ -281,6 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        acc_type = parisc_acctyp(code, regs->iir);
        if (acc_type & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
        mmap_read_lock(mm);
        vma = find_vma_prev(mm, address, &prev_vma);
@@ -302,7 +304,7 @@ good_area:
         * fault.
         */
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -323,10 +325,6 @@ good_area:
                BUG();
        }
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        /*
                         * No need to mmap_read_unlock(mm) as we would
index 44af715..b88fd27 100644 (file)
@@ -366,6 +366,8 @@ initrd-y := $(patsubst zImage%, zImage.initrd%, \
                $(patsubst treeImage%, treeImage.initrd%, $(image-y)))))
 initrd-y := $(filter-out $(image-y), $(initrd-y))
 targets        += $(image-y) $(initrd-y)
+targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \
+               $(patsubst $(x).%, dts/%.dtb, $(filter $(x).%, $(image-y))))
 
 $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
 
index d8a0729..219559d 100644 (file)
@@ -29,9 +29,6 @@ struct dev_archdata {
        struct iommu_table      *iommu_table_base;
 #endif
 
-#ifdef CONFIG_IOMMU_API
-       void                    *iommu_domain;
-#endif
 #ifdef CONFIG_PPC64
        struct pci_dn           *pci_data;
 #endif
index 64c04ab..0069990 100644 (file)
@@ -38,8 +38,7 @@ static inline void set_fs(mm_segment_t fs)
        set_thread_flag(TIF_FSCHECK);
 }
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
-
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 #define user_addr_max()        (get_fs().seg)
 
 #ifdef __powerpc64__
index d4d5946..cbf41fb 100644 (file)
@@ -16,7 +16,7 @@ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+CFLAGS_prom_init.o += -fno-stack-protector
 CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
 CFLAGS_prom_init.o += -ffreestanding
 
index 5ac84ef..9fe4fb3 100644 (file)
  * Here comes the ppc64 implementation of the IOMAP 
  * interfaces.
  */
-unsigned int ioread8(void __iomem *addr)
+unsigned int ioread8(const void __iomem *addr)
 {
        return readb(addr);
 }
-unsigned int ioread16(void __iomem *addr)
+unsigned int ioread16(const void __iomem *addr)
 {
        return readw(addr);
 }
-unsigned int ioread16be(void __iomem *addr)
+unsigned int ioread16be(const void __iomem *addr)
 {
        return readw_be(addr);
 }
-unsigned int ioread32(void __iomem *addr)
+unsigned int ioread32(const void __iomem *addr)
 {
        return readl(addr);
 }
-unsigned int ioread32be(void __iomem *addr)
+unsigned int ioread32be(const void __iomem *addr)
 {
        return readl_be(addr);
 }
@@ -41,27 +41,27 @@ EXPORT_SYMBOL(ioread16be);
 EXPORT_SYMBOL(ioread32);
 EXPORT_SYMBOL(ioread32be);
 #ifdef __powerpc64__
-u64 ioread64(void __iomem *addr)
+u64 ioread64(const void __iomem *addr)
 {
        return readq(addr);
 }
-u64 ioread64_lo_hi(void __iomem *addr)
+u64 ioread64_lo_hi(const void __iomem *addr)
 {
        return readq(addr);
 }
-u64 ioread64_hi_lo(void __iomem *addr)
+u64 ioread64_hi_lo(const void __iomem *addr)
 {
        return readq(addr);
 }
-u64 ioread64be(void __iomem *addr)
+u64 ioread64be(const void __iomem *addr)
 {
        return readq_be(addr);
 }
-u64 ioread64be_lo_hi(void __iomem *addr)
+u64 ioread64be_lo_hi(const void __iomem *addr)
 {
        return readq_be(addr);
 }
-u64 ioread64be_hi_lo(void __iomem *addr)
+u64 ioread64be_hi_lo(const void __iomem *addr)
 {
        return readq_be(addr);
 }
@@ -139,15 +139,15 @@ EXPORT_SYMBOL(iowrite64be_hi_lo);
  * FIXME! We could make these do EEH handling if we really
  * wanted. Not clear if we do.
  */
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        readsb(addr, dst, count);
 }
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        readsw(addr, dst, count);
 }
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        readsl(addr, dst, count);
 }
index be9f745..c2d737f 100644 (file)
 146    common  writev                          sys_writev                      compat_sys_writev
 147    common  getsid                          sys_getsid
 148    common  fdatasync                       sys_fdatasync
-149    nospu   _sysctl                         sys_sysctl                      compat_sys_sysctl
+149    nospu   _sysctl                         sys_ni_syscall
 150    common  mlock                           sys_mlock
 151    common  munlock                         sys_munlock
 152    common  mlockall                        sys_mlockall
index 1478fce..1da9dbb 100644 (file)
@@ -1115,9 +1115,8 @@ void hash__early_init_mmu_secondary(void)
                        && cpu_has_feature(CPU_FTR_HVMODE))
                tlbiel_all();
 
-#ifdef CONFIG_PPC_MEM_KEYS
-       mtspr(SPRN_UAMOR, default_uamor);
-#endif
+       if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY))
+               mtspr(SPRN_UAMOR, default_uamor);
 }
 #endif /* CONFIG_SMP */
 
index 69a6b87..b1d091a 100644 (file)
@@ -73,12 +73,6 @@ static int scan_pkey_feature(void)
        if (early_radix_enabled())
                return 0;
 
-       /*
-        * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
-        */
-       if (!early_cpu_has_feature(CPU_FTR_ARCH_206))
-               return 0;
-
        ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
        if (ret == 0) {
                /*
@@ -124,6 +118,12 @@ void __init pkey_early_init_devtree(void)
                     __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
                                != (sizeof(u64) * BITS_PER_BYTE));
 
+       /*
+        * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
+        */
+       if (!early_cpu_has_feature(CPU_FTR_ARCH_206))
+               return;
+
        /* scan the device tree for pkey feature */
        pkeys_total = scan_pkey_feature();
        if (!pkeys_total)
index b83abbe..8acd001 100644 (file)
@@ -64,7 +64,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
        }
 
        ret = 0;
-       *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
+       *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
        if (unlikely(*flt & VM_FAULT_ERROR)) {
                if (*flt & VM_FAULT_OOM) {
                        ret = -ENOMEM;
@@ -76,11 +76,6 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
                BUG();
        }
 
-       if (*flt & VM_FAULT_MAJOR)
-               current->maj_flt++;
-       else
-               current->min_flt++;
-
 out_unlock:
        mmap_read_unlock(mm);
        return ret;
index 925a723..0add963 100644 (file)
@@ -511,7 +511,7 @@ retry:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        major |= fault & VM_FAULT_MAJOR;
 
@@ -537,14 +537,9 @@ retry:
        /*
         * Major/minor page fault accounting.
         */
-       if (major) {
-               current->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+       if (major)
                cmo_account_page_fault();
-       } else {
-               current->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
-       }
+
        return 0;
 }
 NOKPROBE_SYMBOL(__do_page_fault);
index f4247ad..cf85f06 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS_bootx_init.o            += -fPIC
-CFLAGS_bootx_init.o            += $(call cc-option, -fno-stack-protector)
+CFLAGS_bootx_init.o            += -fno-stack-protector
 
 KASAN_SANITIZE_bootx_init.o := n
 
index 89c76ca..eb25d75 100644 (file)
@@ -7,8 +7,7 @@ UBSAN_SANITIZE := n
 KASAN_SANITIZE := n
 
 # Disable ftrace for the entire directory
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
 
 ifdef CONFIG_CC_IS_CLANG
 # clang stores addresses on the stack causing the frame size to blow
index 7b59055..df18372 100644 (file)
@@ -81,7 +81,7 @@ config RISCV
        select PCI_DOMAINS_GENERIC if PCI
        select PCI_MSI if PCI
        select RISCV_INTC
-       select RISCV_TIMER
+       select RISCV_TIMER if RISCV_SBI
        select SPARSEMEM_STATIC if 32BIT
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
index 6c88148..8a55f61 100644 (file)
@@ -12,6 +12,7 @@ config SOC_SIFIVE
 
 config SOC_VIRT
        bool "QEMU Virt Machine"
+       select CLINT_TIMER if RISCV_M_MODE
        select POWER_RESET
        select POWER_RESET_SYSCON
        select POWER_RESET_SYSCON_POWEROFF
@@ -24,6 +25,7 @@ config SOC_VIRT
 config SOC_KENDRYTE
        bool "Kendryte K210 SoC"
        depends on !MMU
+       select CLINT_TIMER if RISCV_M_MODE
        select SERIAL_SIFIVE if TTY
        select SERIAL_SIFIVE_CONSOLE if TTY
        select SIFIVE_PLIC
index f27596e..e046a0b 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_EXPERT=y
 CONFIG_SLOB=y
 # CONFIG_SLAB_MERGE_DEFAULT is not set
 # CONFIG_MMU is not set
+CONFIG_SOC_VIRT=y
 CONFIG_MAXPHYSMEM_2GB=y
 CONFIG_SMP=y
 CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
@@ -49,7 +50,6 @@ CONFIG_VIRTIO_BLK=y
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
 CONFIG_SERIAL_8250=y
 # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
 CONFIG_SERIAL_8250_CONSOLE=y
@@ -57,16 +57,13 @@ CONFIG_SERIAL_8250_NR_UARTS=1
 CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
 # CONFIG_HWMON is not set
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 # CONFIG_VGA_CONSOLE is not set
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-CONFIG_SIFIVE_PLIC=y
-# CONFIG_VALIDATE_FS_PARSER is not set
 CONFIG_EXT2_FS=y
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
index 3a55f0e..2c2cda6 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
 CONFIG_SOC_VIRT=y
 CONFIG_ARCH_RV32I=y
 CONFIG_SMP=y
@@ -62,6 +63,8 @@ CONFIG_HVC_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
@@ -77,6 +80,8 @@ CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
 CONFIG_RTC_CLASS=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
deleted file mode 100644 (file)
index a279b17..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_RISCV_CLINT_H
-#define _ASM_RISCV_CLINT_H 1
-
-#include <linux/io.h>
-#include <linux/smp.h>
-
-#ifdef CONFIG_RISCV_M_MODE
-extern u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void);
-
-static inline void clint_send_ipi_single(unsigned long hartid)
-{
-       writel(1, clint_ipi_base + hartid);
-}
-
-static inline void clint_send_ipi_mask(const struct cpumask *mask)
-{
-       int cpu;
-
-       for_each_cpu(cpu, mask)
-               clint_send_ipi_single(cpuid_to_hartid_map(cpu));
-}
-
-static inline void clint_clear_ipi(unsigned long hartid)
-{
-       writel(0, clint_ipi_base + hartid);
-}
-#else /* CONFIG_RISCV_M_MODE */
-#define clint_init_boot_cpu()  do { } while (0)
-
-/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
-void clint_send_ipi_single(unsigned long hartid);
-void clint_send_ipi_mask(const struct cpumask *hartid_mask);
-void clint_clear_ipi(unsigned long hartid);
-#endif /* CONFIG_RISCV_M_MODE */
-
-#endif /* _ASM_RISCV_CLINT_H */
index 6dfd2a1..df1f7c4 100644 (file)
 struct seq_file;
 extern unsigned long boot_cpu_hartid;
 
+struct riscv_ipi_ops {
+       void (*ipi_inject)(const struct cpumask *target);
+       void (*ipi_clear)(void);
+};
+
 #ifdef CONFIG_SMP
 /*
  * Mapping between linux logical cpu index and hartid.
@@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
 int riscv_hartid_to_cpuid(int hartid);
 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
 
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
 /* Secondary hart entry */
 asmlinkage void smp_callin(void);
 
@@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
        cpumask_set_cpu(boot_cpu_hartid, out);
 }
 
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
 #endif /* CONFIG_SMP */
 
 #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
index bad2a7c..a3fb85d 100644 (file)
@@ -7,41 +7,27 @@
 #define _ASM_RISCV_TIMEX_H
 
 #include <asm/csr.h>
-#include <asm/mmio.h>
 
 typedef unsigned long cycles_t;
 
-extern u64 __iomem *riscv_time_val;
-extern u64 __iomem *riscv_time_cmp;
-
-#ifdef CONFIG_64BIT
-#define mmio_get_cycles()      readq_relaxed(riscv_time_val)
-#else
-#define mmio_get_cycles()      readl_relaxed(riscv_time_val)
-#define mmio_get_cycles_hi()   readl_relaxed(((u32 *)riscv_time_val) + 1)
-#endif
-
 static inline cycles_t get_cycles(void)
 {
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               return csr_read(CSR_TIME);
-       return mmio_get_cycles();
+       return csr_read(CSR_TIME);
 }
 #define get_cycles get_cycles
 
+static inline u32 get_cycles_hi(void)
+{
+       return csr_read(CSR_TIMEH);
+}
+#define get_cycles_hi get_cycles_hi
+
 #ifdef CONFIG_64BIT
 static inline u64 get_cycles64(void)
 {
        return get_cycles();
 }
 #else /* CONFIG_64BIT */
-static inline u32 get_cycles_hi(void)
-{
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               return csr_read(CSR_TIMEH);
-       return mmio_get_cycles_hi();
-}
-
 static inline u64 get_cycles64(void)
 {
        u32 hi, lo;
index 8ce9d60..f56c66b 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _ASM_RISCV_UACCESS_H
 #define _ASM_RISCV_UACCESS_H
 
+#include <asm/pgtable.h>               /* for TASK_SIZE */
+
 /*
  * User space memory access functions
  */
@@ -62,11 +64,9 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 #define user_addr_max()        (get_fs().seg)
 
-
 /**
  * access_ok: - Checks if a user space pointer is valid
  * @addr: User space pointer to start of block to check
index 3099362..f839f16 100644 (file)
@@ -60,7 +60,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
-static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                                const struct vdso_data *vd)
 {
        /*
         * The purpose of csr_read(CSR_TIME) is to trap the system into
index a5287ab..dc93710 100644 (file)
@@ -31,7 +31,7 @@ obj-y += cacheinfo.o
 obj-y  += patch.o
 obj-$(CONFIG_MMU) += vdso.o vdso/
 
-obj-$(CONFIG_RISCV_M_MODE)     += clint.o traps_misaligned.o
+obj-$(CONFIG_RISCV_M_MODE)     += traps_misaligned.o
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c
deleted file mode 100644 (file)
index 3647980..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Christoph Hellwig.
- */
-
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/types.h>
-#include <asm/clint.h>
-#include <asm/csr.h>
-#include <asm/timex.h>
-#include <asm/smp.h>
-
-/*
- * This is the layout used by the SiFive clint, which is also shared by the qemu
- * virt platform, and the Kendryte KD210 at least.
- */
-#define CLINT_IPI_OFF          0
-#define CLINT_TIME_CMP_OFF     0x4000
-#define CLINT_TIME_VAL_OFF     0xbff8
-
-u32 __iomem *clint_ipi_base;
-
-void clint_init_boot_cpu(void)
-{
-       struct device_node *np;
-       void __iomem *base;
-
-       np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
-       if (!np) {
-               panic("clint not found");
-               return;
-       }
-
-       base = of_iomap(np, 0);
-       if (!base)
-               panic("could not map CLINT");
-
-       clint_ipi_base = base + CLINT_IPI_OFF;
-       riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
-       riscv_time_val = base + CLINT_TIME_VAL_OFF;
-
-       clint_clear_ipi(boot_cpu_hartid);
-}
index d0c5c31..0a4e81b 100644 (file)
@@ -77,16 +77,10 @@ relocate:
        csrw CSR_SATP, a0
 .align 2
 1:
-       /* Set trap vector to exception handler */
-       la a0, handle_exception
+       /* Set trap vector to spin forever to help debug */
+       la a0, .Lsecondary_park
        csrw CSR_TVEC, a0
 
-       /*
-        * Set sup0 scratch register to 0, indicating to exception vector that
-        * we are presently executing in kernel.
-        */
-       csrw CSR_SCRATCH, zero
-
        /* Reload the global pointer */
 .option push
 .option norelax
@@ -144,9 +138,23 @@ secondary_start_common:
        la a0, swapper_pg_dir
        call relocate
 #endif
+       call setup_trap_vector
        tail smp_callin
 #endif /* CONFIG_SMP */
 
+.align 2
+setup_trap_vector:
+       /* Set trap vector to exception handler */
+       la a0, handle_exception
+       csrw CSR_TVEC, a0
+
+       /*
+        * Set sup0 scratch register to 0, indicating to exception vector that
+        * we are presently executing in kernel.
+        */
+       csrw CSR_SCRATCH, zero
+       ret
+
 .Lsecondary_park:
        /* We lack SMP support or have too many harts, so park this hart */
        wfi
@@ -240,6 +248,7 @@ clear_bss_done:
        call relocate
 #endif /* CONFIG_MMU */
 
+       call setup_trap_vector
        /* Restore C environment */
        la tp, init_task
        sw zero, TASK_TI_CPU(tp)
index f383ef5..226ccce 100644 (file)
@@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
        return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
 }
 
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+       struct cpumask hartid_mask;
+
+       riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+       sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+       .ipi_inject = sbi_send_cpumask_ipi
+};
 
 int __init sbi_init(void)
 {
@@ -587,5 +599,7 @@ int __init sbi_init(void)
                __sbi_rfence    = __sbi_rfence_v01;
        }
 
+       riscv_set_ipi_ops(&sbi_ipi_ops);
+
        return 0;
 }
index f04373b..2c6dd32 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/swiotlb.h>
 #include <linux/smp.h>
 
-#include <asm/clint.h>
 #include <asm/cpu_ops.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
@@ -79,7 +78,6 @@ void __init setup_arch(char **cmdline_p)
 #else
        unflatten_device_tree();
 #endif
-       clint_init_boot_cpu();
 
 #ifdef CONFIG_SWIOTLB
        swiotlb_init(1);
index 554b0fb..ea028d9 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/delay.h>
 #include <linux/irq_work.h>
 
-#include <asm/clint.h>
 #include <asm/sbi.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
@@ -86,9 +85,25 @@ static void ipi_stop(void)
                wait_for_interrupt();
 }
 
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+       ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+       if (ipi_ops && ipi_ops->ipi_clear)
+               ipi_ops->ipi_clear();
+
+       csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
 static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
 {
-       struct cpumask hartid_mask;
        int cpu;
 
        smp_mb__before_atomic();
@@ -96,33 +111,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
                set_bit(op, &ipi_data[cpu].bits);
        smp_mb__after_atomic();
 
-       riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               sbi_send_ipi(cpumask_bits(&hartid_mask));
+       if (ipi_ops && ipi_ops->ipi_inject)
+               ipi_ops->ipi_inject(mask);
        else
-               clint_send_ipi_mask(mask);
+               pr_warn("SMP: IPI inject method not available\n");
 }
 
 static void send_ipi_single(int cpu, enum ipi_message_type op)
 {
-       int hartid = cpuid_to_hartid_map(cpu);
-
        smp_mb__before_atomic();
        set_bit(op, &ipi_data[cpu].bits);
        smp_mb__after_atomic();
 
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
-       else
-               clint_send_ipi_single(hartid);
-}
-
-static inline void clear_ipi(void)
-{
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               csr_clear(CSR_IP, IE_SIE);
+       if (ipi_ops && ipi_ops->ipi_inject)
+               ipi_ops->ipi_inject(cpumask_of(cpu));
        else
-               clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+               pr_warn("SMP: IPI inject method not available\n");
 }
 
 #ifdef CONFIG_IRQ_WORK
@@ -140,7 +144,7 @@ void handle_IPI(struct pt_regs *regs)
 
        irq_enter();
 
-       clear_ipi();
+       riscv_clear_ipi();
 
        while (true) {
                unsigned long ops;
index 356825a..96167d5 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/of.h>
 #include <linux/sched/task_stack.h>
 #include <linux/sched/mm.h>
-#include <asm/clint.h>
 #include <asm/cpu_ops.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
@@ -147,8 +146,7 @@ asmlinkage __visible void smp_callin(void)
        struct mm_struct *mm = &init_mm;
        unsigned int curr_cpuid = smp_processor_id();
 
-       if (!IS_ENABLED(CONFIG_RISCV_SBI))
-               clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+       riscv_clear_ipi();
 
        /* All kernel threads share the same mm context.  */
        mmgrab(mm);
index 5873835..716d64e 100644 (file)
@@ -109,7 +109,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, addr, flags);
+       fault = handle_mm_fault(vma, addr, flags, regs);
 
        /*
         * If we need to retry but a fatal signal is pending, handle the
@@ -127,21 +127,7 @@ good_area:
                BUG();
        }
 
-       /*
-        * Major/minor page fault accounting is only done on the
-        * initial attempt. If we go through a retry, it is extremely
-        * likely that the page will be found in page cache at that point.
-        */
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
-                                     1, regs, addr);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-                                     1, regs, addr);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index e63940b..8b98c50 100644 (file)
@@ -7,5 +7,4 @@ obj-$(CONFIG_S390_HYPFS_FS)     += hypfs/
 obj-$(CONFIG_APPLDATA_BASE)    += appldata/
 obj-y                          += net/
 obj-$(CONFIG_PCI)              += pci/
-obj-$(CONFIG_NUMA)             += numa/
 obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
index 8c0b529..3d86e12 100644 (file)
@@ -126,7 +126,6 @@ config S390
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
        select HAVE_ARCH_KASAN_VMALLOC
-       select CLOCKSOURCE_VALIDATE_LAST_CYCLE
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
@@ -766,6 +765,7 @@ config VFIO_AP
        def_tristate n
        prompt "VFIO support for AP devices"
        depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+       depends on ZCRYPT
        help
                This driver grants access to Adjunct Processor (AP) devices
                via the VFIO mediated device interface.
index 8dfa2cf..ba94b03 100644 (file)
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
+KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
 KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
index cae473a..11c5952 100644 (file)
@@ -45,7 +45,11 @@ static inline int atomic_fetch_add(int i, atomic_t *v)
 static inline void atomic_add(int i, atomic_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+       /*
+        * Order of conditions is important to circumvent gcc 10 bug:
+        * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
+        */
+       if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
                __atomic_add_const(i, &v->counter);
                return;
        }
@@ -112,7 +116,11 @@ static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
 static inline void atomic64_add(s64 i, atomic64_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+       /*
+        * Order of conditions is important to circumvent gcc 10 bug:
+        * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
+        */
+       if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
                __atomic64_add_const(i, (long *)&v->counter);
                return;
        }
index 17a2626..c1b82bc 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *   S/390 debug facility
  *
- *    Copyright IBM Corp. 1999, 2000
+ *    Copyright IBM Corp. 1999, 2020
  */
 #ifndef DEBUG_H
 #define DEBUG_H
 #define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
                                              /* the entry information */
 
-#define __DEBUG_FEATURE_VERSION           2  /* version of debug feature */
+#define __DEBUG_FEATURE_VERSION           3  /* version of debug feature */
 
 struct __debug_entry {
-       union {
-               struct {
-                       unsigned long clock     : 52;
-                       unsigned long exception :  1;
-                       unsigned long level     :  3;
-                       unsigned long cpuid     :  8;
-               } fields;
-               unsigned long stck;
-       } id;
+       unsigned long clock     : 60;
+       unsigned long exception :  1;
+       unsigned long level     :  3;
        void *caller;
+       unsigned short cpu;
 } __packed;
 
 typedef struct __debug_entry debug_entry_t;
index fbb5075..3a0ac0c 100644 (file)
@@ -86,12 +86,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
 
 #define pcibus_to_node(bus) __pcibus_to_node(bus)
 
-#define node_distance(a, b) __node_distance(a, b)
-static inline int __node_distance(int a, int b)
-{
-       return 0;
-}
-
 #else /* !CONFIG_NUMA */
 
 #define numa_node_id numa_node_id
index 3244388..f09444d 100644 (file)
@@ -32,7 +32,7 @@
 #define USER_DS_SACF   (3)
 
 #define get_fs()        (current->thread.mm_segment)
-#define segment_eq(a,b) (((a) & 2) == ((b) & 2))
+#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS)
 
 void set_fs(mm_segment_t fs);
 
index a8f1369..efca709 100644 (file)
@@ -49,6 +49,7 @@ CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
 
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_SCHED_TOPOLOGY)   += topology.o
+obj-$(CONFIG_NUMA)             += numa.o
 obj-$(CONFIG_AUDIT)            += audit.o
 compat-obj-$(CONFIG_AUDIT)     += compat_audit.o
 obj-$(CONFIG_COMPAT)           += compat_linux.o compat_signal.o
index beb4b44..b6619ae 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *   S/390 debug facility
  *
- *    Copyright IBM Corp. 1999, 2012
+ *    Copyright IBM Corp. 1999, 2020
  *
  *    Author(s): Michael Holzheu (holzheu@de.ibm.com),
  *              Holger Smolinski (Holger.Smolinski@de.ibm.com)
@@ -433,7 +433,7 @@ static int debug_format_entry(file_private_info_t *p_info)
        act_entry = (debug_entry_t *) ((char *)id_snap->areas[p_info->act_area]
                                       [p_info->act_page] + p_info->act_entry);
 
-       if (act_entry->id.stck == 0LL)
+       if (act_entry->clock == 0LL)
                goto out; /* empty entry */
        if (view->header_proc)
                len += view->header_proc(id_snap, view, p_info->act_area,
@@ -829,12 +829,17 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
 static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active,
                                      int level, int exception)
 {
-       active->id.stck = get_tod_clock_fast() -
-               *(unsigned long long *) &tod_clock_base[1];
-       active->id.fields.cpuid = smp_processor_id();
+       unsigned char clk[STORE_CLOCK_EXT_SIZE];
+       unsigned long timestamp;
+
+       get_tod_clock_ext(clk);
+       timestamp = *(unsigned long *) &clk[0] >> 4;
+       timestamp -= TOD_UNIX_EPOCH >> 12;
+       active->clock = timestamp;
+       active->cpu = smp_processor_id();
        active->caller = __builtin_return_address(0);
-       active->id.fields.exception = exception;
-       active->id.fields.level = level;
+       active->exception = exception;
+       active->level = level;
        proceed_active_entry(id);
        if (exception)
                proceed_active_area(id);
@@ -1398,25 +1403,24 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
 int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
                         int area, debug_entry_t *entry, char *out_buf)
 {
-       unsigned long base, sec, usec;
+       unsigned long sec, usec;
        unsigned long caller;
        unsigned int level;
        char *except_str;
        int rc = 0;
 
-       level = entry->id.fields.level;
-       base = (*(unsigned long *) &tod_clock_base[0]) >> 4;
-       sec = (entry->id.stck >> 12) + base - (TOD_UNIX_EPOCH >> 12);
+       level = entry->level;
+       sec = entry->clock;
        usec = do_div(sec, USEC_PER_SEC);
 
-       if (entry->id.fields.exception)
+       if (entry->exception)
                except_str = "*";
        else
                except_str = "-";
        caller = (unsigned long) entry->caller;
-       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK  ",
+       rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK  ",
                      area, sec, usec, level, except_str,
-                     entry->id.fields.cpuid, (void *)caller);
+                     entry->cpu, (void *)caller);
        return rc;
 }
 EXPORT_SYMBOL(debug_dflt_header_fn);
index b06dec1..ec801d3 100644 (file)
@@ -160,24 +160,6 @@ asmlinkage void execve_tail(void)
        asm volatile("sfpc %0" : : "d" (0));
 }
 
-/*
- * fill in the FPU structure for a core dump.
- */
-int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
-{
-       save_fpu_regs();
-       fpregs->fpc = current->thread.fpu.fpc;
-       fpregs->pad = 0;
-       if (MACHINE_HAS_VX)
-               convert_vx_to_fp((freg_t *)&fpregs->fprs,
-                                current->thread.fpu.vxrs);
-       else
-               memcpy(&fpregs->fprs, current->thread.fpu.fprs,
-                      sizeof(fpregs->fprs));
-       return 1;
-}
-EXPORT_SYMBOL(dump_fpu);
-
 unsigned long get_wchan(struct task_struct *p)
 {
        struct unwind_state state;
index f1fda43..10456bc 100644 (file)
 146  common    writev                  sys_writev                      compat_sys_writev
 147  common    getsid                  sys_getsid                      sys_getsid
 148  common    fdatasync               sys_fdatasync                   sys_fdatasync
-149  common    _sysctl                 sys_sysctl                      compat_sys_sysctl
+149  common    _sysctl                 -                               -
 150  common    mlock                   sys_mlock                       sys_mlock
 151  common    munlock                 sys_munlock                     sys_munlock
 152  common    mlockall                sys_mlockall                    sys_mlockall
index 1608fd9..2f17729 100644 (file)
@@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
        struct page *page = NULL;
 
        mmap_read_lock(kvm->mm);
-       get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
+       get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
                              &page, NULL, NULL);
        mmap_read_unlock(kvm->mm);
        return page;
index 66da278..6b74b92 100644 (file)
@@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 
                r = set_guest_storage_key(current->mm, hva, keys[i], 0);
                if (r) {
-                       r = fixup_user_fault(current, current->mm, hva,
+                       r = fixup_user_fault(current->mm, hva,
                                             FAULT_FLAG_WRITE, &unlocked);
                        if (r)
                                break;
index 2f721a9..cd74989 100644 (file)
@@ -273,7 +273,7 @@ retry:
        rc = get_guest_storage_key(current->mm, vmaddr, &key);
 
        if (rc) {
-               rc = fixup_user_fault(current, current->mm, vmaddr,
+               rc = fixup_user_fault(current->mm, vmaddr,
                                      FAULT_FLAG_WRITE, &unlocked);
                if (!rc) {
                        mmap_read_unlock(current->mm);
@@ -319,7 +319,7 @@ retry:
        mmap_read_lock(current->mm);
        rc = reset_guest_reference_bit(current->mm, vmaddr);
        if (rc < 0) {
-               rc = fixup_user_fault(current, current->mm, vmaddr,
+               rc = fixup_user_fault(current->mm, vmaddr,
                                      FAULT_FLAG_WRITE, &unlocked);
                if (!rc) {
                        mmap_read_unlock(current->mm);
@@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
                                                m3 & SSKE_MC);
 
                if (rc < 0) {
-                       rc = fixup_user_fault(current, current->mm, vmaddr,
+                       rc = fixup_user_fault(current->mm, vmaddr,
                                              FAULT_FLAG_WRITE, &unlocked);
                        rc = !rc ? -EAGAIN : rc;
                }
@@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                        rc = cond_set_guest_storage_key(current->mm, vmaddr,
                                                        key, NULL, nq, mr, mc);
                        if (rc < 0) {
-                               rc = fixup_user_fault(current, current->mm, vmaddr,
+                               rc = fixup_user_fault(current->mm, vmaddr,
                                                      FAULT_FLAG_WRITE, &unlocked);
                                rc = !rc ? -EAGAIN : rc;
                        }
index eb382ce..7c98899 100644 (file)
@@ -64,6 +64,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
                        break;
                if (state.reliable && !addr) {
                        pr_err("unwind state reliable but addr is 0\n");
+                       kfree(bt);
                        return -EINVAL;
                }
                sprint_symbol(sym, addr);
index aebf918..4c8c063 100644 (file)
@@ -476,7 +476,7 @@ retry:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
        if (fault_signal_pending(fault, regs)) {
                fault = VM_FAULT_SIGNAL;
                if (flags & FAULT_FLAG_RETRY_NOWAIT)
@@ -486,21 +486,7 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR))
                goto out_up;
 
-       /*
-        * Major/minor page fault accounting is only done on the
-        * initial attempt. If we go through a retry, it is extremely
-        * likely that the page will be found in page cache at that point.
-        */
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-                                     regs, address);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-                                     regs, address);
-               }
                if (fault & VM_FAULT_RETRY) {
                        if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
                            (flags & FAULT_FLAG_RETRY_NOWAIT)) {
index 190357f..373542c 100644 (file)
@@ -649,7 +649,7 @@ retry:
                rc = vmaddr;
                goto out_up;
        }
-       if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
+       if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
                             &unlocked)) {
                rc = -EFAULT;
                goto out_up;
@@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 
        BUG_ON(gmap_is_shadow(gmap));
        fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
-       if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
+       if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
                return -EFAULT;
        if (unlocked)
                /* lost mmap_lock, caller has to retry __gmap_translate */
@@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
 }
 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
+                                   unsigned long end, struct mm_walk *walk)
+{
+       struct vm_area_struct *vma = walk->vma;
+
+       split_huge_pmd(vma, pmd, addr);
+       return 0;
+}
+
+static const struct mm_walk_ops thp_split_walk_ops = {
+       .pmd_entry      = thp_split_walk_pmd_entry,
+};
+
 static inline void thp_split_mm(struct mm_struct *mm)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        struct vm_area_struct *vma;
-       unsigned long addr;
 
        for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
-               for (addr = vma->vm_start;
-                    addr < vma->vm_end;
-                    addr += PAGE_SIZE)
-                       follow_page(vma, addr, FOLL_SPLIT);
                vma->vm_flags &= ~VM_HUGEPAGE;
                vma->vm_flags |= VM_NOHUGEPAGE;
+               walk_page_vma(vma, &thp_split_walk_ops, NULL);
        }
        mm->def_flags |= VM_NOHUGEPAGE;
-#endif
 }
+#else
+static inline void thp_split_mm(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
  * Remove all empty zero pages from the mapping for lazy refaulting
index 9fc2b01..d209271 100644 (file)
@@ -1,75 +1,77 @@
 # SPDX-License-Identifier: GPL-2.0
 config SUPERH
        def_bool y
+       select ARCH_32BIT_OFF_T
+       select ARCH_HAVE_CUSTOM_GPIO_H
+       select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
        select ARCH_HAS_BINFMT_FLAT if !MMU
+       select ARCH_HAS_GIGANTIC_PAGE
+       select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_HIBERNATION_POSSIBLE if MMU
        select ARCH_MIGHT_HAVE_PC_PARPORT
-       select HAVE_PATA_PLATFORM
+       select ARCH_WANT_IPC_PARSE_VERSION
        select CLKDEV_LOOKUP
+       select CPU_NO_EFFICIENT_FFS
        select DMA_DECLARE_COHERENT
-       select HAVE_IDE if HAS_IOPORT_MAP
-       select HAVE_OPROFILE
+       select GENERIC_ATOMIC64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+       select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_SHOW
+       select GENERIC_PCI_IOMAP if PCI
+       select GENERIC_SCHED_CLOCK
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select GENERIC_SMP_IDLE_THREAD
+       select GUP_GET_PTE_LOW_HIGH if X2TLB
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_PERF_EVENTS
+       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_BUGVERBOSE
-       select HAVE_FAST_GUP if MMU
-       select ARCH_HAVE_CUSTOM_GPIO_H
-       select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
-       select ARCH_HAS_GCOV_PROFILE_ALL
-       select PERF_USE_VMALLOC
        select HAVE_DEBUG_KMEMLEAK
-       select HAVE_KERNEL_GZIP
-       select CPU_NO_EFFICIENT_FFS
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FAST_GUP if MMU
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_HW_BREAKPOINT
+       select HAVE_IDE if HAS_IOPORT_MAP
+       select HAVE_IOREMAP_PROT if MMU && !X2TLB
        select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZMA
-       select HAVE_KERNEL_XZ
        select HAVE_KERNEL_LZO
+       select HAVE_KERNEL_XZ
+       select HAVE_KPROBES
+       select HAVE_KRETPROBES
+       select HAVE_MIXED_BREAKPOINTS_REGS
+       select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
+       select HAVE_NMI
+       select HAVE_OPROFILE
+       select HAVE_PATA_PLATFORM
+       select HAVE_PERF_EVENTS
+       select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_UID16
-       select ARCH_WANT_IPC_PARSE_VERSION
+       select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select MAY_HAVE_SPARSE_IRQ
        select IRQ_FORCED_THREADING
-       select RTC_LIB
-       select GENERIC_ATOMIC64
-       select GENERIC_IRQ_SHOW
-       select GENERIC_SMP_IDLE_THREAD
-       select GENERIC_IDLE_POLL_SETUP
-       select GENERIC_CLOCKEVENTS
-       select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
-       select GENERIC_PCI_IOMAP if PCI
-       select GENERIC_SCHED_CLOCK
-       select GENERIC_STRNCPY_FROM_USER
-       select GENERIC_STRNLEN_USER
-       select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
+       select MAY_HAVE_SPARSE_IRQ
        select MODULES_USE_ELF_RELA
+       select NEED_SG_DMA_LENGTH
+       select NO_DMA if !MMU && !DMA_COHERENT
        select NO_GENERIC_PCI_IOPORT_MAP if PCI
-       select OLD_SIGSUSPEND
        select OLD_SIGACTION
+       select OLD_SIGSUSPEND
        select PCI_DOMAINS if PCI
-       select HAVE_ARCH_AUDITSYSCALL
-       select HAVE_FUTEX_CMPXCHG if FUTEX
-       select HAVE_NMI
-       select NEED_SG_DMA_LENGTH
-       select ARCH_HAS_GIGANTIC_PAGE
-       select ARCH_32BIT_OFF_T
-       select GUP_GET_PTE_LOW_HIGH if X2TLB
-       select HAVE_KPROBES
-       select HAVE_KRETPROBES
-       select HAVE_IOREMAP_PROT if MMU && !X2TLB
-       select HAVE_FUNCTION_TRACER
-       select HAVE_FTRACE_MCOUNT_RECORD
-       select HAVE_DYNAMIC_FTRACE
-       select ARCH_WANT_IPC_PARSE_VERSION
-       select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_ARCH_KGDB
-       select HAVE_HW_BREAKPOINT
-       select HAVE_MIXED_BREAKPOINTS_REGS
        select PERF_EVENTS
-       select ARCH_HIBERNATION_POSSIBLE if MMU
+       select PERF_USE_VMALLOC
+       select RTC_LIB
        select SPARSE_IRQ
-       select HAVE_STACKPROTECTOR
        help
          The SuperH is a RISC processor targeted for use in embedded systems
          and consumer electronics; it was also used in the Sega Dreamcast
@@ -123,8 +125,8 @@ config ARCH_HAS_ILOG2_U64
 
 config NO_IOPORT_MAP
        def_bool !PCI
-       depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
-                  !SH_HP6XX && !SH_SOLUTION_ENGINE
+       depends on !SH_SH4202_MICRODEV && !SH_SHMIN && !SH_HP6XX && \
+                  !SH_SOLUTION_ENGINE
 
 config IO_TRAPPED
        bool
@@ -136,8 +138,10 @@ config DMA_COHERENT
        bool
 
 config DMA_NONCOHERENT
-       def_bool !DMA_COHERENT
+       def_bool !NO_DMA && !DMA_COHERENT
+       select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select DMA_DIRECT_REMAP
 
 config PGTABLE_LEVELS
        default 3 if X2TLB
@@ -630,7 +634,7 @@ config SMP
          Y to "Enhanced Real Time Clock Support", below.
 
          See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
-         available at <http://www.tldp.org/docs.html#howto>.
+         available at <https://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
 
@@ -726,7 +730,6 @@ config ZERO_PAGE_OFFSET
 config BOOT_LINK_OFFSET
        hex
        default "0x00210000" if SH_SHMIN
-       default "0x00400000" if SH_CAYMAN
        default "0x00810000" if SH_7780_SOLUTION_ENGINE
        default "0x009e0000" if SH_TITAN
        default "0x01800000" if SH_SDK7780
index da9cf95..2faebfd 100644 (file)
@@ -15,11 +15,7 @@ ifneq ($(SUBARCH),$(ARCH))
   endif
 endif
 
-ifeq ($(ARCH),sh)
 KBUILD_DEFCONFIG       := shx3_defconfig
-else
-KBUILD_DEFCONFIG       := cayman_defconfig
-endif
 
 isa-y                                  := any
 isa-$(CONFIG_SH_DSP)                   := sh
@@ -143,7 +139,6 @@ machdir-$(CONFIG_SH_SH7763RDP)                      += mach-sh7763rdp
 machdir-$(CONFIG_SH_SH4202_MICRODEV)           += mach-microdev
 machdir-$(CONFIG_SH_LANDISK)                   += mach-landisk
 machdir-$(CONFIG_SH_LBOX_RE2)                  += mach-lboxre2
-machdir-$(CONFIG_SH_CAYMAN)                    += mach-cayman
 machdir-$(CONFIG_SH_RSK)                       += mach-rsk
 
 ifneq ($(machdir-y),)
index fb0ca0c..83bcb6d 100644 (file)
@@ -340,12 +340,6 @@ config SH_MAGIC_PANEL_R2
        help
          Select Magic Panel R2 if configuring for Magic Panel R2.
 
-config SH_CAYMAN
-       bool "Hitachi Cayman"
-       depends on CPU_SUBTYPE_SH5_101 || CPU_SUBTYPE_SH5_103
-       select HAVE_PCI
-       select ARCH_MIGHT_HAVE_PC_SERIO
-
 config SH_POLARIS
        bool "SMSC Polaris"
        select CPU_HAS_IPR_IRQ
index ef9c87d..6ea85e4 100644 (file)
@@ -126,14 +126,14 @@ static void __init sh2007_init_irq(void)
  */
 static void __init sh2007_setup(char **cmdline_p)
 {
-       printk(KERN_INFO "SH-2007 Setup...");
+       pr_info("SH-2007 Setup...");
 
        /* setup wait control registers for area 5 */
        __raw_writel(CS5BCR_D, CS5BCR);
        __raw_writel(CS5WCR_D, CS5WCR);
        __raw_writel(CS5PCR_D, CS5PCR);
 
-       printk(KERN_INFO " done.\n");
+       pr_cont(" done.\n");
 }
 
 /*
diff --git a/arch/sh/boards/mach-cayman/Makefile b/arch/sh/boards/mach-cayman/Makefile
deleted file mode 100644 (file)
index 775a4be..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Hitachi Cayman specific parts of the kernel
-#
-obj-y := setup.o irq.o panic.o
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c
deleted file mode 100644 (file)
index 0305d0b..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/mach-cayman/irq.c - SH-5 Cayman Interrupt Support
- *
- * This file handles the board specific parts of the Cayman interrupt system
- *
- * Copyright (C) 2002 Stuart Menefy
- */
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/signal.h>
-#include <cpu/irq.h>
-#include <asm/page.h>
-
-/* Setup for the SMSC FDC37C935 / LAN91C100FD */
-#define SMSC_IRQ         IRQ_IRL1
-
-/* Setup for PCI Bus 2, which transmits interrupts via the EPLD */
-#define PCI2_IRQ         IRQ_IRL3
-
-unsigned long epld_virt;
-
-#define EPLD_BASE        0x04002000
-#define EPLD_STATUS_BASE (epld_virt + 0x10)
-#define EPLD_MASK_BASE   (epld_virt + 0x20)
-
-/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto
-   the same SH-5 interrupt */
-
-static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id)
-{
-        printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n");
-       return IRQ_NONE;
-}
-
-static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id)
-{
-        printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq);
-       return IRQ_NONE;
-}
-
-static void enable_cayman_irq(struct irq_data *data)
-{
-       unsigned int irq = data->irq;
-       unsigned long flags;
-       unsigned long mask;
-       unsigned int reg;
-       unsigned char bit;
-
-       irq -= START_EXT_IRQS;
-       reg = EPLD_MASK_BASE + ((irq / 8) << 2);
-       bit = 1<<(irq % 8);
-       local_irq_save(flags);
-       mask = __raw_readl(reg);
-       mask |= bit;
-       __raw_writel(mask, reg);
-       local_irq_restore(flags);
-}
-
-static void disable_cayman_irq(struct irq_data *data)
-{
-       unsigned int irq = data->irq;
-       unsigned long flags;
-       unsigned long mask;
-       unsigned int reg;
-       unsigned char bit;
-
-       irq -= START_EXT_IRQS;
-       reg = EPLD_MASK_BASE + ((irq / 8) << 2);
-       bit = 1<<(irq % 8);
-       local_irq_save(flags);
-       mask = __raw_readl(reg);
-       mask &= ~bit;
-       __raw_writel(mask, reg);
-       local_irq_restore(flags);
-}
-
-struct irq_chip cayman_irq_type = {
-       .name           = "Cayman-IRQ",
-       .irq_unmask     = enable_cayman_irq,
-       .irq_mask       = disable_cayman_irq,
-};
-
-int cayman_irq_demux(int evt)
-{
-       int irq = intc_evt_to_irq[evt];
-
-       if (irq == SMSC_IRQ) {
-               unsigned long status;
-               int i;
-
-               status = __raw_readl(EPLD_STATUS_BASE) &
-                        __raw_readl(EPLD_MASK_BASE) & 0xff;
-               if (status == 0) {
-                       irq = -1;
-               } else {
-                       for (i=0; i<8; i++) {
-                               if (status & (1<<i))
-                                       break;
-                       }
-                       irq = START_EXT_IRQS + i;
-               }
-       }
-
-       if (irq == PCI2_IRQ) {
-               unsigned long status;
-               int i;
-
-               status = __raw_readl(EPLD_STATUS_BASE + 3 * sizeof(u32)) &
-                        __raw_readl(EPLD_MASK_BASE + 3 * sizeof(u32)) & 0xff;
-               if (status == 0) {
-                       irq = -1;
-               } else {
-                       for (i=0; i<8; i++) {
-                               if (status & (1<<i))
-                                       break;
-                       }
-                       irq = START_EXT_IRQS + (3 * 8) + i;
-               }
-       }
-
-       return irq;
-}
-
-void init_cayman_irq(void)
-{
-       int i;
-
-       epld_virt = (unsigned long)ioremap(EPLD_BASE, 1024);
-       if (!epld_virt) {
-               printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n");
-               return;
-       }
-
-       for (i = 0; i < NR_EXT_IRQS; i++) {
-               irq_set_chip_and_handler(START_EXT_IRQS + i,
-                                        &cayman_irq_type, handle_level_irq);
-       }
-
-       /* Setup the SMSC interrupt */
-       if (request_irq(SMSC_IRQ, cayman_interrupt_smsc, 0, "Cayman SMSC Mux",
-                       NULL))
-               pr_err("Failed to register Cayman SMSC Mux interrupt\n");
-       if (request_irq(PCI2_IRQ, cayman_interrupt_pci2, 0, "Cayman PCI2 Mux",
-                       NULL))
-               pr_err("Failed to register Cayman PCI2 Mux interrupt\n");
-}
diff --git a/arch/sh/boards/mach-cayman/panic.c b/arch/sh/boards/mach-cayman/panic.c
deleted file mode 100644 (file)
index cfc4631..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2003  Richard Curnow, SuperH UK Limited
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <cpu/registers.h>
-
-/* THIS IS A PHYSICAL ADDRESS */
-#define HDSP2534_ADDR (0x04002100)
-
-static void poor_mans_delay(void)
-{
-       int i;
-
-       for (i = 0; i < 2500000; i++)
-               cpu_relax();
-}
-
-static void show_value(unsigned long x)
-{
-       int i;
-       unsigned nibble;
-       for (i = 0; i < 8; i++) {
-               nibble = ((x >> (i * 4)) & 0xf);
-
-               __raw_writeb(nibble + ((nibble > 9) ? 55 : 48),
-                         HDSP2534_ADDR + 0xe0 + ((7 - i) << 2));
-       }
-}
-
-void
-panic_handler(unsigned long panicPC, unsigned long panicSSR,
-             unsigned long panicEXPEVT)
-{
-       while (1) {
-               /* This piece of code displays the PC on the LED display */
-               show_value(panicPC);
-               poor_mans_delay();
-               show_value(panicSSR);
-               poor_mans_delay();
-               show_value(panicEXPEVT);
-               poor_mans_delay();
-       }
-}
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
deleted file mode 100644 (file)
index 8ef76e2..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/mach-cayman/setup.c
- *
- * SH5 Cayman support
- *
- * Copyright (C) 2002  David J. Mckay & Benedict Gaster
- * Copyright (C) 2003 - 2007  Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <cpu/irq.h>
-
-/*
- * Platform Dependent Interrupt Priorities.
- */
-
-/* Using defaults defined in irq.h */
-#define        RES NO_PRIORITY         /* Disabled */
-#define IR0 IRL0_PRIORITY      /* IRLs */
-#define IR1 IRL1_PRIORITY
-#define IR2 IRL2_PRIORITY
-#define IR3 IRL3_PRIORITY
-#define PCA INTA_PRIORITY      /* PCI Ints */
-#define PCB INTB_PRIORITY
-#define PCC INTC_PRIORITY
-#define PCD INTD_PRIORITY
-#define SER TOP_PRIORITY
-#define ERR TOP_PRIORITY
-#define PW0 TOP_PRIORITY
-#define PW1 TOP_PRIORITY
-#define PW2 TOP_PRIORITY
-#define PW3 TOP_PRIORITY
-#define DM0 NO_PRIORITY                /* DMA Ints */
-#define DM1 NO_PRIORITY
-#define DM2 NO_PRIORITY
-#define DM3 NO_PRIORITY
-#define DAE NO_PRIORITY
-#define TU0 TIMER_PRIORITY     /* TMU Ints */
-#define TU1 NO_PRIORITY
-#define TU2 NO_PRIORITY
-#define TI2 NO_PRIORITY
-#define ATI NO_PRIORITY                /* RTC Ints */
-#define PRI NO_PRIORITY
-#define CUI RTC_PRIORITY
-#define ERI SCIF_PRIORITY      /* SCIF Ints */
-#define RXI SCIF_PRIORITY
-#define BRI SCIF_PRIORITY
-#define TXI SCIF_PRIORITY
-#define ITI TOP_PRIORITY       /* WDT Ints */
-
-/* Setup for the SMSC FDC37C935 */
-#define SMSC_SUPERIO_BASE      0x04000000
-#define SMSC_CONFIG_PORT_ADDR  0x3f0
-#define SMSC_INDEX_PORT_ADDR   SMSC_CONFIG_PORT_ADDR
-#define SMSC_DATA_PORT_ADDR    0x3f1
-
-#define SMSC_ENTER_CONFIG_KEY  0x55
-#define SMSC_EXIT_CONFIG_KEY   0xaa
-
-#define SMCS_LOGICAL_DEV_INDEX 0x07
-#define SMSC_DEVICE_ID_INDEX   0x20
-#define SMSC_DEVICE_REV_INDEX  0x21
-#define SMSC_ACTIVATE_INDEX    0x30
-#define SMSC_PRIMARY_BASE_INDEX  0x60
-#define SMSC_SECONDARY_BASE_INDEX 0x62
-#define SMSC_PRIMARY_INT_INDEX 0x70
-#define SMSC_SECONDARY_INT_INDEX 0x72
-
-#define SMSC_IDE1_DEVICE       1
-#define SMSC_KEYBOARD_DEVICE   7
-#define SMSC_CONFIG_REGISTERS  8
-
-#define SMSC_SUPERIO_READ_INDEXED(index) ({ \
-       outb((index), SMSC_INDEX_PORT_ADDR); \
-       inb(SMSC_DATA_PORT_ADDR); })
-#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \
-       outb((index), SMSC_INDEX_PORT_ADDR); \
-       outb((val),   SMSC_DATA_PORT_ADDR); })
-
-#define IDE1_PRIMARY_BASE      0x01f0
-#define IDE1_SECONDARY_BASE    0x03f6
-
-unsigned long smsc_superio_virt;
-
-int platform_int_priority[NR_INTC_IRQS] = {
-       IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ  0- 7 */
-       RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ  8-15 */
-       PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */
-       RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */
-       TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */
-       RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */
-       RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */
-       RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */
-};
-
-static int __init smsc_superio_setup(void)
-{
-       unsigned char devid, devrev;
-
-       smsc_superio_virt = (unsigned long)ioremap(SMSC_SUPERIO_BASE, 1024);
-       if (!smsc_superio_virt) {
-               panic("Unable to remap SMSC SuperIO\n");
-       }
-
-       /* Initially the chip is in run state */
-       /* Put it into configuration state */
-       outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
-       outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
-
-       /* Read device ID info */
-       devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX);
-       devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX);
-       printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev);
-
-       /* Select the keyboard device */
-       SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX);
-
-       /* enable it */
-       SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
-
-       /* Select the interrupts */
-       /* On a PC keyboard is IRQ1, mouse is IRQ12 */
-       SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
-       SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
-
-       /*
-        * Only IDE1 exists on the Cayman
-        */
-
-       /* Power it on */
-       SMSC_SUPERIO_WRITE_INDEXED(1 << SMSC_IDE1_DEVICE, 0x22);
-
-       SMSC_SUPERIO_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX);
-       SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX);
-
-       SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE >> 8,
-                                  SMSC_PRIMARY_BASE_INDEX + 0);
-       SMSC_SUPERIO_WRITE_INDEXED(IDE1_PRIMARY_BASE & 0xff,
-                                  SMSC_PRIMARY_BASE_INDEX + 1);
-
-       SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE >> 8,
-                                  SMSC_SECONDARY_BASE_INDEX + 0);
-       SMSC_SUPERIO_WRITE_INDEXED(IDE1_SECONDARY_BASE & 0xff,
-                                  SMSC_SECONDARY_BASE_INDEX + 1);
-
-       SMSC_SUPERIO_WRITE_INDEXED(14, SMSC_PRIMARY_INT_INDEX);
-
-       SMSC_SUPERIO_WRITE_INDEXED(SMSC_CONFIG_REGISTERS,
-                                  SMCS_LOGICAL_DEV_INDEX);
-
-       SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */
-       SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
-       SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
-       SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-
-       /* Exit the configuration state */
-       outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
-
-       return 0;
-}
-device_initcall(smsc_superio_setup);
-
-static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len)
-{
-       if (port < 0x400) {
-               extern unsigned long smsc_superio_virt;
-               return (void __iomem *)((port << 2) | smsc_superio_virt);
-       }
-
-       return (void __iomem *)port;
-}
-
-extern void init_cayman_irq(void);
-
-static struct sh_machine_vector mv_cayman __initmv = {
-       .mv_name                = "Hitachi Cayman",
-       .mv_ioport_map          = cayman_ioport_map,
-       .mv_init_irq            = init_cayman_irq,
-};
index 16b4d8b..2c44b94 100644 (file)
@@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup);
 
 static void __init landisk_setup(char **cmdline_p)
 {
+       /* I/O port identity mapping */
+       __set_io_port_base(0);
+
        /* LED ON */
        __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
 
index ad0e240..589d2d8 100644 (file)
@@ -28,10 +28,7 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
                        $(CONFIG_BOOT_LINK_OFFSET)]')
 endif
 
-ifeq ($(CONFIG_MCOUNT),y)
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
-endif
+ccflags-remove-$(CONFIG_MCOUNT) += -pg
 
 LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
                   -T $(obj)/../../kernel/vmlinux.lds
diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig
deleted file mode 100644 (file)
index 911437c..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-CONFIG_POSIX_MQUEUE=y
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_FORCE_MAX_ZONEORDER=11
-CONFIG_MEMORY_START=0x80000000
-CONFIG_MEMORY_SIZE=0x00400000
-CONFIG_FLATMEM_MANUAL=y
-CONFIG_CACHE_OFF=y
-CONFIG_SH_PCLK_FREQ=50000000
-CONFIG_HEARTBEAT=y
-CONFIG_PREEMPT=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-# CONFIG_IPV6 is not set
-# CONFIG_FW_LOADER is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=m
-CONFIG_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_SH_MOBILE_LCDC=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x16=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-# CONFIG_LOGO_SUPERH_MONO is not set
-# CONFIG_LOGO_SUPERH_VGA16 is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_MINIX_FS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_SCHEDSTATS=y
-CONFIG_FRAME_POINTER=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
index ae067e0..6a82c7b 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_MODULES=y
index a5b865a..9a988c3 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index a92db66..70e6605 100644 (file)
@@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_CPU_SUBTYPE_SH7709=y
index 567af75..ba6ec04 100644 (file)
@@ -1,6 +1,5 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
index 10f6d37..05e4ac6 100644 (file)
@@ -1,6 +1,5 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
index ed84d13..c65667d 100644 (file)
@@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_CPU_SUBTYPE_SH4_202=y
index 37e9521..a24cf8c 100644 (file)
@@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index c97ec60..e922659 100644 (file)
@@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 CONFIG_SLAB=y
index 55fce65..5978866 100644 (file)
@@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index 6a3cfe0..fc9c221 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index 2b3d7d2..ff3fd67 100644 (file)
@@ -1,7 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index 21a43f1..ff5bb44 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
 # CONFIG_COMPAT_BRK is not set
index 4e794e7..5d6c193 100644 (file)
@@ -2,7 +2,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SHMEM is not set
index 3264415..71a672c 100644 (file)
@@ -1,7 +1,6 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_ELF_CORE is not set
index 4496b94..ed00a6e 100644 (file)
@@ -2,7 +2,6 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
index b23f675..3f1c137 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
index 1623436..4a02406 100644 (file)
@@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
index 360592d..8422599 100644 (file)
@@ -1,7 +1,6 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
index 87db9a8..f0073ed 100644 (file)
@@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
index 0842691..0d81477 100644 (file)
@@ -2,7 +2,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SHMEM is not set
index d0933a9..a2700ab 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
index d0a0aa7..26c5fd0 100644 (file)
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
index a27b129..c0b6f40 100644 (file)
@@ -1,7 +1,6 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BUG is not set
index 4ec961a..ba887f1 100644 (file)
@@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
index a5c1e90..d313fd3 100644 (file)
@@ -25,4 +25,3 @@ obj-$(CONFIG_SH_7780_SOLUTION_ENGINE) += fixups-sdk7780.o
 obj-$(CONFIG_SH_TITAN)                 += fixups-titan.o
 obj-$(CONFIG_SH_LANDISK)               += fixups-landisk.o
 obj-$(CONFIG_SH_LBOX_RE2)              += fixups-rts7751r2d.o
-obj-$(CONFIG_SH_CAYMAN)                        += fixups-cayman.o
index fe163ec..2fd2b77 100644 (file)
@@ -54,7 +54,7 @@ int __init pci_is_66mhz_capable(struct pci_channel *hose,
        int cap66 = -1;
        u16 stat;
 
-       printk(KERN_INFO "PCI: Checking 66MHz capabilities...\n");
+       pr_info("PCI: Checking 66MHz capabilities...\n");
 
        for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
                if (PCI_FUNC(pci_devfn))
@@ -134,7 +134,7 @@ unsigned int pcibios_handle_status_errors(unsigned long addr,
                pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT |
                                      PCI_STATUS_SIG_TARGET_ABORT |
                                      PCI_STATUS_REC_MASTER_ABORT, 1);
-               printk("\n");
+               pr_cont("\n");
 
                cmd |= PCI_STATUS_REC_TARGET_ABORT;
        }
@@ -143,7 +143,7 @@ unsigned int pcibios_handle_status_errors(unsigned long addr,
                printk(KERN_DEBUG "PCI: parity error detected: ");
                pcibios_report_status(PCI_STATUS_PARITY |
                                      PCI_STATUS_DETECTED_PARITY, 1);
-               printk("\n");
+               pr_cont("\n");
 
                cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY;
 
diff --git a/arch/sh/drivers/pci/fixups-cayman.c b/arch/sh/drivers/pci/fixups-cayman.c
deleted file mode 100644 (file)
index c797bfb..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <cpu/irq.h>
-#include "pci-sh5.h"
-
-int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       int result = -1;
-
-       /* The complication here is that the PCI IRQ lines from the Cayman's 2
-          5V slots get into the CPU via a different path from the IRQ lines
-          from the 3 3.3V slots.  Thus, we have to detect whether the card's
-          interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling'
-          at the point where we cross from 5V to 3.3V is not the normal case.
-
-          The added complication is that we don't know that the 5V slots are
-          always bus 2, because a card containing a PCI-PCI bridge may be
-          plugged into a 3.3V slot, and this changes the bus numbering.
-
-          Also, the Cayman has an intermediate PCI bus that goes a custom
-          expansion board header (and to the secondary bridge).  This bus has
-          never been used in practice.
-
-          The 1ary onboard PCI-PCI bridge is device 3 on bus 0
-          The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of
-          the 1ary bridge.
-          */
-
-       struct slot_pin {
-               int slot;
-               int pin;
-       } path[4];
-       int i=0;
-
-       while (dev->bus->number > 0) {
-
-               slot = path[i].slot = PCI_SLOT(dev->devfn);
-               pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin);
-               dev = dev->bus->self;
-               i++;
-               if (i > 3) panic("PCI path to root bus too long!\n");
-       }
-
-       slot = PCI_SLOT(dev->devfn);
-       /* This is the slot on bus 0 through which the device is eventually
-          reachable. */
-
-       /* Now work back up. */
-       if ((slot < 3) || (i == 0)) {
-               /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
-                  swizzle now. */
-               result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1;
-       } else {
-               i--;
-               slot = path[i].slot;
-               pin  = path[i].pin;
-               if (slot > 0) {
-                       panic("PCI expansion bus device found - not handled!\n");
-               } else {
-                       if (i > 0) {
-                               /* 5V slots */
-                               i--;
-                               slot = path[i].slot;
-                               pin  = path[i].pin;
-                               /* 'pin' was swizzled earlier wrt slot, don't do it again. */
-                               result = IRQ_P2INTA + (pin - 1);
-                       } else {
-                               /* IRQ for 2ary PCI-PCI bridge : unused */
-                               result = -1;
-                       }
-               }
-       }
-
-       return result;
-}
index 287b3a6..9a624a6 100644 (file)
@@ -148,7 +148,7 @@ static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
 
        printk(KERN_DEBUG "PCI: system error received: ");
        pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
-       printk("\n");
+       pr_cont("\n");
 
        /* Deassert SERR */
        __raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
@@ -179,7 +179,7 @@ static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
        ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0,
                          "PCI SERR interrupt", hose);
        if (unlikely(ret)) {
-               printk(KERN_ERR "PCI: Failed hooking SERR IRQ\n");
+               pr_err("PCI: Failed hooking SERR IRQ\n");
                return ret;
        }
 
@@ -250,7 +250,7 @@ static int __init sh7780_pci_init(void)
        const char *type;
        int ret, i;
 
-       printk(KERN_NOTICE "PCI: Starting initialization.\n");
+       pr_notice("PCI: Starting initialization.\n");
 
        chan->reg_base = 0xfe040000;
 
@@ -270,7 +270,7 @@ static int __init sh7780_pci_init(void)
 
        id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
        if (id != PCI_VENDOR_ID_RENESAS) {
-               printk(KERN_ERR "PCI: Unknown vendor ID 0x%04x.\n", id);
+               pr_err("PCI: Unknown vendor ID 0x%04x.\n", id);
                return -ENODEV;
        }
 
@@ -281,14 +281,13 @@ static int __init sh7780_pci_init(void)
               (id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
                                          NULL;
        if (unlikely(!type)) {
-               printk(KERN_ERR "PCI: Found an unsupported Renesas host "
-                      "controller, device id 0x%04x.\n", id);
+               pr_err("PCI: Found an unsupported Renesas host controller, device id 0x%04x.\n",
+                      id);
                return -EINVAL;
        }
 
-       printk(KERN_NOTICE "PCI: Found a Renesas %s host "
-              "controller, revision %d.\n", type,
-              __raw_readb(chan->reg_base + PCI_REVISION_ID));
+       pr_notice("PCI: Found a Renesas %s host controller, revision %d.\n",
+                 type, __raw_readb(chan->reg_base + PCI_REVISION_ID));
 
        /*
         * Now throw it in to register initialization mode and
@@ -395,9 +394,9 @@ static int __init sh7780_pci_init(void)
 
        sh7780_pci66_init(chan);
 
-       printk(KERN_NOTICE "PCI: Running at %dMHz.\n",
-              (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ) ?
-              66 : 33);
+       pr_notice("PCI: Running at %dMHz.\n",
+                 (__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ)
+                 ? 66 : 33);
 
        return 0;
 
index c7784e1..6ab0b73 100644 (file)
@@ -120,8 +120,7 @@ int register_pci_controller(struct pci_channel *hose)
         * Do not panic here but later - this might happen before console init.
         */
        if (!hose->io_map_base) {
-               printk(KERN_WARNING
-                      "registering PCI controller with io_map_base unset\n");
+               pr_warn("registering PCI controller with io_map_base unset\n");
        }
 
        /*
@@ -145,7 +144,7 @@ out:
        for (--i; i >= 0; i--)
                release_resource(&hose->resources[i]);
 
-       printk(KERN_WARNING "Skipping PCI bus scan due to resource conflict\n");
+       pr_warn("Skipping PCI bus scan due to resource conflict\n");
        return -1;
 }
 
@@ -213,8 +212,8 @@ pcibios_bus_report_status_early(struct pci_channel *hose,
                                        pci_devfn, PCI_STATUS,
                                        status & status_mask);
                if (warn)
-                       printk("(%02x:%02x: %04X) ", current_bus,
-                              pci_devfn, status);
+                       pr_cont("(%02x:%02x: %04X) ", current_bus, pci_devfn,
+                               status);
        }
 }
 
@@ -249,7 +248,7 @@ pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
                pci_write_config_word(dev, PCI_STATUS, status & status_mask);
 
                if (warn)
-                       printk("(%s: %04X) ", pci_name(dev), status);
+                       pr_cont("(%s: %04X) ", pci_name(dev), status);
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list)
index 99ec668..feccfe6 100644 (file)
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __ASM_ADC_H
 #define __ASM_ADC_H
-#ifdef __KERNEL__
 /*
  * Copyright (C) 2004  Andriy Skulysh
  */
@@ -10,5 +9,4 @@
 
 int adc_single(unsigned int channel);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_ADC_H */
index 34bfbcd..468fba3 100644 (file)
@@ -7,8 +7,6 @@
 #ifndef __ASM_SH_ADDRSPACE_H
 #define __ASM_SH_ADDRSPACE_H
 
-#ifdef __KERNEL__
-
 #include <cpu/addrspace.h>
 
 /* If this CPU supports segmentation, hook up the helpers */
@@ -62,5 +60,4 @@
 #define P3_ADDR_MAX            P4SEG
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_ADDRSPACE_H */
index 445dd14..450b585 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_BITOPS_H
 #define __ASM_SH_BITOPS_H
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -71,6 +69,4 @@ static inline unsigned long __ffs(unsigned long word)
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_BITOPS_H */
index 2408ac4..a293343 100644 (file)
@@ -8,7 +8,6 @@
  */
 #ifndef __ASM_SH_CACHE_H
 #define __ASM_SH_CACHE_H
-#ifdef __KERNEL__
 
 #include <linux/init.h>
 #include <cpu/cache.h>
@@ -44,5 +43,4 @@ struct cache_info {
        unsigned long flags;
 };
 #endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHE_H */
index fe74000..4486a86 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_CACHEFLUSH_H
 #define __ASM_SH_CACHEFLUSH_H
 
-#ifdef __KERNEL__
-
 #include <linux/mm.h>
 
 /*
@@ -109,5 +107,4 @@ static inline void *sh_cacheop_vaddr(void *vaddr)
        return vaddr;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHEFLUSH_H */
index 4d5a21a..17d23ae 100644 (file)
@@ -6,7 +6,6 @@
  */
 #ifndef __ASM_SH_DMA_H
 #define __ASM_SH_DMA_H
-#ifdef __KERNEL__
 
 #include <linux/spinlock.h>
 #include <linux/wait.h>
@@ -144,5 +143,4 @@ extern int isa_dma_bridge_buggy;
 #define isa_dma_bridge_buggy   (0)
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_DMA_H */
index 7661fb5..2862d6d 100644 (file)
@@ -90,7 +90,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
 #endif
 #define ELF_ARCH       EM_SH
 
-#ifdef __KERNEL__
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
@@ -209,5 +208,4 @@ do {                                                                \
        NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape);          \
 } while (0)
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_ELF_H */
index 43cfaf9..04584be 100644 (file)
@@ -37,11 +37,6 @@ struct user_regset;
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 extern int init_fpu(struct task_struct *);
 
-extern int fpregs_get(struct task_struct *target,
-                     const struct user_regset *regset,
-                     unsigned int pos, unsigned int count,
-                     void *kbuf, void __user *ubuf);
-
 static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
 {
        if (task_thread_info(tsk)->status & TS_USEDFPU) {
index 18133bf..87c2362 100644 (file)
@@ -6,9 +6,7 @@
  */
 #ifndef __ASM_SH_FREQ_H
 #define __ASM_SH_FREQ_H
-#ifdef __KERNEL__
 
 #include <cpu/freq.h>
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_FREQ_H */
index b39cda0..b70f3fc 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_FUTEX_H
 #define __ASM_SH_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/errno.h>
@@ -71,5 +69,4 @@ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
        return ret;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_FUTEX_H */
index 26f0f9b..6d5c646 100644 (file)
 #include <asm/cache.h>
 #include <asm/addrspace.h>
 #include <asm/machvec.h>
+#include <asm/page.h>
 #include <linux/pgtable.h>
 #include <asm-generic/iomap.h>
 
-#ifdef __KERNEL__
 #define __IO_PREFIX     generic
 #include <asm/io_generic.h>
-#include <asm/io_trapped.h>
 #include <asm-generic/pci_iomap.h>
 #include <mach/mangle-port.h>
 
@@ -243,125 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
 #define phys_to_virt(address)  (__va(address))
 #endif
 
-/*
- * On 32-bit SH, we traditionally have the whole physical address space
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
- * not need to do anything but place the address in the proper segment.
- * This is true for P1 and P2 addresses, as well as some P3 ones.
- * However, most of the P3 addresses and newer cores using extended
- * addressing need to map through page tables, so the ioremap()
- * implementation becomes a bit more complicated.
- *
- * See arch/sh/mm/ioremap.c for additional notes on this.
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- *
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
- * doesn't exist, so everything must go through page tables.
- */
 #ifdef CONFIG_MMU
+void iounmap(void __iomem *addr);
 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
                               pgprot_t prot, void *caller);
-void iounmap(void __iomem *addr);
-
-static inline void __iomem *
-__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-       return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
-}
-
-static inline void __iomem *
-__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-#ifdef CONFIG_29BIT
-       phys_addr_t last_addr = offset + size - 1;
-
-       /*
-        * For P1 and P2 space this is trivial, as everything is already
-        * mapped. Uncached access for P1 addresses are done through P2.
-        * In the P3 case or for addresses outside of the 29-bit space,
-        * mapping must be done by the PMB or by using page tables.
-        */
-       if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-               u64 flags = pgprot_val(prot);
-
-               /*
-                * Anything using the legacy PTEA space attributes needs
-                * to be kicked down to page table mappings.
-                */
-               if (unlikely(flags & _PAGE_PCC_MASK))
-                       return NULL;
-               if (unlikely(flags & _PAGE_CACHABLE))
-                       return (void __iomem *)P1SEGADDR(offset);
-
-               return (void __iomem *)P2SEGADDR(offset);
-       }
-
-       /* P4 above the store queues are always mapped. */
-       if (unlikely(offset >= P3_ADDR_MAX))
-               return (void __iomem *)P4SEGADDR(offset);
-#endif
-
-       return NULL;
-}
-
-static inline void __iomem *
-__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-       void __iomem *ret;
-
-       ret = __ioremap_trapped(offset, size);
-       if (ret)
-               return ret;
-
-       ret = __ioremap_29bit(offset, size, prot);
-       if (ret)
-               return ret;
-
-       return __ioremap(offset, size, prot);
-}
-#else
-#define __ioremap(offset, size, prot)          ((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, prot)     ((void __iomem *)(offset))
-static inline void iounmap(void __iomem *addr) {}
-#endif /* CONFIG_MMU */
 
 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 {
-       return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+       return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
+                       __builtin_return_address(0));
 }
 
 static inline void __iomem *
 ioremap_cache(phys_addr_t offset, unsigned long size)
 {
-       return __ioremap_mode(offset, size, PAGE_KERNEL);
+       return __ioremap_caller(offset, size, PAGE_KERNEL,
+                       __builtin_return_address(0));
 }
 #define ioremap_cache ioremap_cache
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static inline void __iomem *
-ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+               unsigned long flags)
 {
-       return __ioremap_mode(offset, size, __pgprot(flags));
+       return __ioremap_caller(offset, size, __pgprot(flags),
+                       __builtin_return_address(0));
 }
-#endif
+#endif /* CONFIG_HAVE_IOREMAP_PROT */
 
-#ifdef CONFIG_IOREMAP_FIXED
-extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
-extern int iounmap_fixed(void __iomem *);
-extern void ioremap_fixed_init(void);
-#else
-static inline void __iomem *
-ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
-{
-       BUG();
-       return NULL;
-}
-
-static inline void ioremap_fixed_init(void) { }
-static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
-#endif
+#else /* CONFIG_MMU */
+#define iounmap(addr)          do { } while (0)
+#define ioremap(offset, size)  ((void __iomem *)(unsigned long)(offset))
+#endif /* CONFIG_MMU */
 
 #define ioremap_uc     ioremap
 
@@ -380,6 +292,4 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
 int valid_phys_addr_range(phys_addr_t addr, size_t size);
 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_IO_H */
index 9605453..de8693f 100644 (file)
@@ -12,8 +12,7 @@ enum die_val {
 };
 
 /* arch/sh/kernel/dumpstack.c */
-extern void printk_address(unsigned long address, int reliable,
-                          const char *loglvl);
+extern void printk_address(unsigned long address, int reliable);
 extern void dump_mem(const char *str, const char *loglvl,
                     unsigned long bottom, unsigned long top);
 
index 48e67d5..f664e51 100644 (file)
@@ -8,7 +8,6 @@
 #ifndef __ASM_SH_MMU_CONTEXT_H
 #define __ASM_SH_MMU_CONTEXT_H
 
-#ifdef __KERNEL__
 #include <cpu/mmu_context.h>
 #include <asm/tlbflush.h>
 #include <linux/uaccess.h>
@@ -177,5 +176,4 @@ static inline void disable_mmu(void)
 #define disable_mmu()  do { } while (0)
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_MMU_CONTEXT_H */
index cbaee1d..6552a08 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_MMZONE_H
 #define __ASM_SH_MMZONE_H
 
-#ifdef __KERNEL__
-
 #ifdef CONFIG_NEED_MULTIPLE_NODES
 #include <linux/numa.h>
 
@@ -44,5 +42,4 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
 /* arch/sh/mm/init.c */
 void __init allocate_pgdat(unsigned int nid);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_MMZONE_H */
index 10a36b1..ad22e88 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_PCI_H
 #define __ASM_SH_PCI_H
 
-#ifdef __KERNEL__
-
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
    or architectures with incomplete PCI setup by the loader */
@@ -96,6 +94,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_PCI_H */
-
index d444094..aa92cc9 100644 (file)
@@ -8,7 +8,6 @@
 
 #ifndef __ASM_SH_PROCESSOR_32_H
 #define __ASM_SH_PROCESSOR_32_H
-#ifdef __KERNEL__
 
 #include <linux/compiler.h>
 #include <linux/linkage.h>
@@ -203,5 +202,4 @@ static inline void prefetchw(const void *x)
 }
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_PROCESSOR_32_H */
index 33d1d28..02e54a3 100644 (file)
@@ -24,8 +24,7 @@ typedef struct {
 #define USER_DS                KERNEL_DS
 #endif
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define get_fs()       (current_thread_info()->addr_limit)
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
index f054c30..891f2f8 100644 (file)
@@ -112,8 +112,8 @@ typedef struct uart_reg {
 #define FCR_RFRES      0x0200  /* Receiver FIFO reset */
 #define FCR_TFRES      0x0400  /* Transmitter FIFO reset */
 #define FCR_DMA                0x0800  /* DMA mode select */
-#define FCR_RTL                0x4000  /* Receiver triger (LSB) */
-#define FCR_RTM                0x8000  /* Receiver triger (MSB) */
+#define FCR_RTL                0x4000  /* Receiver trigger (LSB) */
+#define FCR_RTM                0x8000  /* Receiver trigger (MSB) */
 
 /* Line Control Register */
 
index 4eb8997..4703cbe 100644 (file)
@@ -2,16 +2,11 @@
 #ifndef __ASM_SH_SPARSEMEM_H
 #define __ASM_SH_SPARSEMEM_H
 
-#ifdef __KERNEL__
 /*
  * SECTION_SIZE_BITS           2^N: how big each section will be
- * MAX_PHYSADDR_BITS           2^N: how much physical address space we have
- * MAX_PHYSMEM_BITS            2^N: how much memory we can have in that space
+ * MAX_PHYSMEM_BITS            2^N: how much physical address space we have
  */
 #define SECTION_SIZE_BITS      26
-#define MAX_PHYSADDR_BITS      32
 #define MAX_PHYSMEM_BITS       32
 
-#endif
-
 #endif /* __ASM_SH_SPARSEMEM_H */
index 50c173c..4f98cdc 100644 (file)
@@ -12,8 +12,6 @@
 
 struct stacktrace_ops {
        void (*address)(void *data, unsigned long address, int reliable);
-       /* On negative return stop dumping */
-       int (*stack)(void *data, char *name);
 };
 
 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
index 3558b1d..a276b19 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_STRING_H
 #define __ASM_SH_STRING_H
 
-#ifdef __KERNEL__
-
 /*
  * Copyright (C) 1999 Niibe Yutaka
  * But consider these trivial functions to be public domain.
@@ -28,32 +26,6 @@ static inline char *strcpy(char *__dest, const char *__src)
        return __xdest;
 }
 
-#define __HAVE_ARCH_STRNCPY
-static inline char *strncpy(char *__dest, const char *__src, size_t __n)
-{
-       register char *__xdest = __dest;
-       unsigned long __dummy;
-
-       if (__n == 0)
-               return __xdest;
-
-       __asm__ __volatile__(
-               "1:\n"
-               "mov.b  @%1+, %2\n\t"
-               "mov.b  %2, @%0\n\t"
-               "cmp/eq #0, %2\n\t"
-               "bt/s   2f\n\t"
-               " cmp/eq        %5,%1\n\t"
-               "bf/s   1b\n\t"
-               " add   #1, %0\n"
-               "2:"
-               : "=r" (__dest), "=r" (__src), "=&z" (__dummy)
-               : "0" (__dest), "1" (__src), "r" (__src+__n)
-               : "memory", "t");
-
-       return __xdest;
-}
-
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char *__cs, const char *__ct)
 {
@@ -127,6 +99,4 @@ extern void *memchr(const void *__s, int __c, size_t __n);
 #define __HAVE_ARCH_STRLEN
 extern size_t strlen(const char *);
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_STRING_H */
index 0b5b8e7..cb51a75 100644 (file)
@@ -40,10 +40,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
                                            struct pt_regs *regs,
                                            int error, long val)
 {
-       if (error)
-               regs->regs[0] = -error;
-       else
-               regs->regs[0] = val;
+       regs->regs[0] = (long) error ?: val;
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
index 9f9faf6..5c555b8 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_SYSCALLS_32_H
 #define __ASM_SH_SYSCALLS_32_H
 
-#ifdef __KERNEL__
-
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
@@ -26,5 +24,4 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
                                 unsigned long thread_info_flags);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_SYSCALLS_32_H */
index 6404be6..243ea51 100644 (file)
@@ -10,8 +10,6 @@
  *  Copyright (C) 2002  David Howells (dhowells@redhat.com)
  *  - Incorporating suggestions made by Linus Torvalds and Dave Miller
  */
-#ifdef __KERNEL__
-
 #include <asm/page.h>
 
 /*
@@ -170,7 +168,4 @@ static inline unsigned int get_thread_fault_code(void)
 }
 
 #endif /* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_THREAD_INFO_H */
index 624cf55..5d7ddc0 100644 (file)
@@ -26,6 +26,9 @@ do {                                                          \
        case 4:                                                 \
                __get_user_asm(x, ptr, retval, "l");            \
                break;                                          \
+       case 8:                                                 \
+               __get_user_u64(x, ptr, retval);                 \
+               break;                                          \
        default:                                                \
                __get_user_unknown();                           \
                break;                                          \
@@ -66,6 +69,56 @@ do {                                                 \
 
 extern void __get_user_unknown(void);
 
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+       "1:\n\t" \
+       "mov.l  %2,%R1\n\t" \
+       "mov.l  %T2,%S1\n\t" \
+       "2:\n" \
+       ".section       .fixup,\"ax\"\n" \
+       "3:\n\t" \
+       "mov  #0,%R1\n\t"   \
+       "mov  #0,%S1\n\t"   \
+       "mov.l  4f, %0\n\t" \
+       "jmp    @%0\n\t" \
+       " mov   %3, %0\n\t" \
+       ".balign        4\n" \
+       "4:     .long   2b\n\t" \
+       ".previous\n" \
+       ".section       __ex_table,\"a\"\n\t" \
+       ".long  1b, 3b\n\t" \
+       ".long  1b + 2, 3b\n\t" \
+       ".previous" \
+       :"=&r" (err), "=&r" (x) \
+       :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#else
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+       "1:\n\t" \
+       "mov.l  %2,%S1\n\t" \
+       "mov.l  %T2,%R1\n\t" \
+       "2:\n" \
+       ".section       .fixup,\"ax\"\n" \
+       "3:\n\t" \
+       "mov  #0,%S1\n\t"   \
+       "mov  #0,%R1\n\t"   \
+       "mov.l  4f, %0\n\t" \
+       "jmp    @%0\n\t" \
+       " mov   %3, %0\n\t" \
+       ".balign        4\n" \
+       "4:     .long   2b\n\t" \
+       ".previous\n" \
+       ".section       __ex_table,\"a\"\n\t" \
+       ".long  1b, 3b\n\t" \
+       ".long  1b + 2, 3b\n\t" \
+       ".previous" \
+       :"=&r" (err), "=&r" (x) \
+       :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#endif
+
 #define __put_user_size(x,ptr,size,retval)             \
 do {                                                   \
        retval = 0;                                     \
index cecd0fc..b9ca4c9 100644 (file)
@@ -8,7 +8,6 @@
  */
 #ifndef __ASM_SH_WATCHDOG_H
 #define __ASM_SH_WATCHDOG_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/io.h>
@@ -157,5 +156,4 @@ static inline void sh_wdt_write_csr(__u8 val)
        __raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
 }
 #endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_WATCHDOG_H */
index b0f5574..aa0fbc9 100644 (file)
@@ -47,5 +47,3 @@ obj-$(CONFIG_DWARF_UNWINDER)  += dwarf.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_event.o perf_callchain.o
 obj-$(CONFIG_DMA_NONCOHERENT)  += dma-coherent.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)               += hw_breakpoint.o
-
-ccflags-y := -Werror
index 8455437..08e1af6 100644 (file)
@@ -376,148 +376,148 @@ static void print_sh_insn(u32 memaddr, u16 insn)
                }
 
        ok:
-               printk("%-8s  ", op->name);
+               pr_cont("%-8s  ", op->name);
                lastsp = (op->arg[0] == A_END);
                disp_pc = 0;
                for (n = 0; n < 6 && op->arg[n] != A_END; n++) {
                        if (n && op->arg[1] != A_END)
-                               printk(", ");
+                               pr_cont(", ");
                        switch (op->arg[n]) {
                        case A_IMM:
-                               printk("#%d", (char)(imm));
+                               pr_cont("#%d", (char)(imm));
                                break;
                        case A_R0:
-                               printk("r0");
+                               pr_cont("r0");
                                break;
                        case A_REG_N:
-                               printk("r%d", rn);
+                               pr_cont("r%d", rn);
                                break;
                        case A_INC_N:
-                               printk("@r%d+", rn);
+                               pr_cont("@r%d+", rn);
                                break;
                        case A_DEC_N:
-                               printk("@-r%d", rn);
+                               pr_cont("@-r%d", rn);
                                break;
                        case A_IND_N:
-                               printk("@r%d", rn);
+                               pr_cont("@r%d", rn);
                                break;
                        case A_DISP_REG_N:
-                               printk("@(%d,r%d)", imm, rn);
+                               pr_cont("@(%d,r%d)", imm, rn);
                                break;
                        case A_REG_M:
-                               printk("r%d", rm);
+                               pr_cont("r%d", rm);
                                break;
                        case A_INC_M:
-                               printk("@r%d+", rm);
+                               pr_cont("@r%d+", rm);
                                break;
                        case A_DEC_M:
-                               printk("@-r%d", rm);
+                               pr_cont("@-r%d", rm);
                                break;
                        case A_IND_M:
-                               printk("@r%d", rm);
+                               pr_cont("@r%d", rm);
                                break;
                        case A_DISP_REG_M:
-                               printk("@(%d,r%d)", imm, rm);
+                               pr_cont("@(%d,r%d)", imm, rm);
                                break;
                        case A_REG_B:
-                               printk("r%d_bank", rb);
+                               pr_cont("r%d_bank", rb);
                                break;
                        case A_DISP_PC:
                                disp_pc = 1;
                                disp_pc_addr = imm + 4 + (memaddr & relmask);
-                               printk("%08x <%pS>", disp_pc_addr,
-                                      (void *)disp_pc_addr);
+                               pr_cont("%08x <%pS>", disp_pc_addr,
+                                       (void *)disp_pc_addr);
                                break;
                        case A_IND_R0_REG_N:
-                               printk("@(r0,r%d)", rn);
+                               pr_cont("@(r0,r%d)", rn);
                                break;
                        case A_IND_R0_REG_M:
-                               printk("@(r0,r%d)", rm);
+                               pr_cont("@(r0,r%d)", rm);
                                break;
                        case A_DISP_GBR:
-                               printk("@(%d,gbr)",imm);
+                               pr_cont("@(%d,gbr)", imm);
                                break;
                        case A_R0_GBR:
-                               printk("@(r0,gbr)");
+                               pr_cont("@(r0,gbr)");
                                break;
                        case A_BDISP12:
                        case A_BDISP8:
-                               printk("%08x", imm + memaddr);
+                               pr_cont("%08x", imm + memaddr);
                                break;
                        case A_SR:
-                               printk("sr");
+                               pr_cont("sr");
                                break;
                        case A_GBR:
-                               printk("gbr");
+                               pr_cont("gbr");
                                break;
                        case A_VBR:
-                               printk("vbr");
+                               pr_cont("vbr");
                                break;
                        case A_SSR:
-                               printk("ssr");
+                               pr_cont("ssr");
                                break;
                        case A_SPC:
-                               printk("spc");
+                               pr_cont("spc");
                                break;
                        case A_MACH:
-                               printk("mach");
+                               pr_cont("mach");
                                break;
                        case A_MACL:
-                               printk("macl");
+                               pr_cont("macl");
                                break;
                        case A_PR:
-                               printk("pr");
+                               pr_cont("pr");
                                break;
                        case A_SGR:
-                               printk("sgr");
+                               pr_cont("sgr");
                                break;
                        case A_DBR:
-                               printk("dbr");
+                               pr_cont("dbr");
                                break;
                        case FD_REG_N:
                        case F_REG_N:
-                               printk("fr%d", rn);
+                               pr_cont("fr%d", rn);
                                break;
                        case F_REG_M:
-                               printk("fr%d", rm);
+                               pr_cont("fr%d", rm);
                                break;
                        case DX_REG_N:
                                if (rn & 1) {
-                                       printk("xd%d", rn & ~1);
+                                       pr_cont("xd%d", rn & ~1);
                                        break;
                                }
                                /* else, fall through */
                        case D_REG_N:
-                               printk("dr%d", rn);
+                               pr_cont("dr%d", rn);
                                break;
                        case DX_REG_M:
                                if (rm & 1) {
-                                       printk("xd%d", rm & ~1);
+                                       pr_cont("xd%d", rm & ~1);
                                        break;
                                }
                                /* else, fall through */
                        case D_REG_M:
-                               printk("dr%d", rm);
+                               pr_cont("dr%d", rm);
                                break;
                        case FPSCR_M:
                        case FPSCR_N:
-                               printk("fpscr");
+                               pr_cont("fpscr");
                                break;
                        case FPUL_M:
                        case FPUL_N:
-                               printk("fpul");
+                               pr_cont("fpul");
                                break;
                        case F_FR0:
-                               printk("fr0");
+                               pr_cont("fr0");
                                break;
                        case V_REG_N:
-                               printk("fv%d", rn*4);
+                               pr_cont("fv%d", rn*4);
                                break;
                        case V_REG_M:
-                               printk("fv%d", rm*4);
+                               pr_cont("fv%d", rm*4);
                                break;
                        case XMTRX_M4:
-                               printk("xmtrx");
+                               pr_cont("xmtrx");
                                break;
                        default:
                                return;
@@ -532,7 +532,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
                        else
                                __get_user(val, (u32 *)disp_pc_addr);
 
-                       printk("  ! %08x <%pS>", val, (void *)val);
+                       pr_cont("  ! %08x <%pS>", val, (void *)val);
                }
 
                return;
@@ -541,7 +541,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
 
        }
 
-       printk(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]);
+       pr_info(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]);
 }
 
 void show_code(struct pt_regs *regs)
@@ -552,20 +552,21 @@ void show_code(struct pt_regs *regs)
        if (regs->pc & 0x1)
                return;
 
-       printk("Code:\n");
+       pr_info("Code:\n");
 
        for (i = -3 ; i < 6 ; i++) {
                unsigned short insn;
 
                if (__get_user(insn, pc + i)) {
-                       printk(" (Bad address in pc)\n");
+                       pr_err(" (Bad address in pc)\n");
                        break;
                }
 
-               printk("%s%08lx:  ", (i ? "  ": "->"), (unsigned long)(pc + i));
+               pr_info("%s%08lx:  ", (i ? "  " : "->"),
+                       (unsigned long)(pc + i));
                print_sh_insn((unsigned long)(pc + i), insn);
-               printk("\n");
+               pr_cont("\n");
        }
 
-       printk("\n");
+       pr_info("\n");
 }
index d481169..cd46a98 100644 (file)
@@ -3,60 +3,13 @@
  * Copyright (C) 2004 - 2007  Paul Mundt
  */
 #include <linux/mm.h>
-#include <linux/init.h>
 #include <linux/dma-noncoherent.h>
-#include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/addrspace.h>
 
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
+void arch_dma_prep_coherent(struct page *page, size_t size)
 {
-       void *ret, *ret_nocache;
-       int order = get_order(size);
-
-       gfp |= __GFP_ZERO;
-
-       ret = (void *)__get_free_pages(gfp, order);
-       if (!ret)
-               return NULL;
-
-       /*
-        * Pages from the page allocator may have data present in
-        * cache. So flush the cache before using uncached memory.
-        */
-       arch_sync_dma_for_device(virt_to_phys(ret), size,
-                       DMA_BIDIRECTIONAL);
-
-       ret_nocache = (void __force *)ioremap(virt_to_phys(ret), size);
-       if (!ret_nocache) {
-               free_pages((unsigned long)ret, order);
-               return NULL;
-       }
-
-       split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
-
-       *dma_handle = virt_to_phys(ret);
-       if (!WARN_ON(!dev))
-               *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
-
-       return ret_nocache;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, unsigned long attrs)
-{
-       int order = get_order(size);
-       unsigned long pfn = (dma_handle >> PAGE_SHIFT);
-       int k;
-
-       if (!WARN_ON(!dev))
-               pfn += dev->dma_pfn_offset;
-
-       for (k = 0; k < (1 << order); k++)
-               __free_pages(pfn_to_page(pfn + k), 0);
-
-       iounmap(vaddr);
+       __flush_purge_region(page_address(page), size);
 }
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
index a13c045..758a6c8 100644 (file)
@@ -16,8 +16,8 @@
 #include <asm/unwinder.h>
 #include <asm/stacktrace.h>
 
-void dump_mem(const char *str, const char *loglvl,
-             unsigned long bottom, unsigned long top)
+void dump_mem(const char *str, const char *loglvl, unsigned long bottom,
+             unsigned long top)
 {
        unsigned long p;
        int i;
@@ -31,23 +31,23 @@ void dump_mem(const char *str, const char *loglvl,
                        unsigned int val;
 
                        if (p < bottom || p >= top)
-                               printk("%s         ", loglvl);
+                               pr_cont("         ");
                        else {
                                if (__get_user(val, (unsigned int __user *)p)) {
-                                       printk("%s\n", loglvl);
+                                       pr_cont("\n");
                                        return;
                                }
-                               printk("%s%08x ", loglvl, val);
+                               pr_cont("%08x ", val);
                        }
                }
-               printk("%s\n", loglvl);
+               pr_cont("\n");
        }
 }
 
-void printk_address(unsigned long address, int reliable, const char *loglvl)
+void printk_address(unsigned long address, int reliable)
 {
-       printk("%s [<%p>] %s%pS\n", loglvl, (void *) address,
-                       reliable ? "" : "? ", (void *) address);
+       pr_cont(" [<%px>] %s%pS\n", (void *) address,
+               reliable ? "" : "? ", (void *) address);
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -107,22 +107,16 @@ stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
        }
 }
 
-static int print_trace_stack(void *data, char *name)
-{
-       printk("%s <%s> ", (char *)data, name);
-       return 0;
-}
-
 /*
  * Print one address/symbol entries per line.
  */
 static void print_trace_address(void *data, unsigned long addr, int reliable)
 {
-       printk_address(addr, reliable, (char *)data);
+       printk("%s", (char *)data);
+       printk_address(addr, reliable);
 }
 
 static const struct stacktrace_ops print_trace_ops = {
-       .stack = print_trace_stack,
        .address = print_trace_address,
 };
 
@@ -136,7 +130,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
 
        unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl);
 
-       printk("%s\n", loglvl);
+       pr_cont("\n");
 
        if (!tsk)
                tsk = current;
index 9bac5bb..ad96310 100644 (file)
@@ -178,34 +178,6 @@ syscall_exit_work:
        bra     resume_userspace
         nop
 
-       .align  2
-syscall_trace_entry:
-       !                       Yes it is traced.
-       mov     r15, r4
-       mov.l   7f, r11         ! Call do_syscall_trace_enter which notifies
-       jsr     @r11            ! superior (will chomp R[0-7])
-        nop
-       mov.l   r0, @(OFF_R0,r15)       ! Save return value
-       !                       Reload R0-R4 from kernel stack, where the
-       !                       parent may have modified them using
-       !                       ptrace(POKEUSR).  (Note that R0-R2 are
-       !                       reloaded from the kernel stack by syscall_call
-       !                       below, so don't need to be reloaded here.)
-       !                       This allows the parent to rewrite system calls
-       !                       and args on the fly.
-       mov.l   @(OFF_R4,r15), r4   ! arg0
-       mov.l   @(OFF_R5,r15), r5
-       mov.l   @(OFF_R6,r15), r6
-       mov.l   @(OFF_R7,r15), r7   ! arg3
-       mov.l   @(OFF_R3,r15), r3   ! syscall_nr
-       !
-       mov.l   6f, r10                 ! Number of syscalls
-       cmp/hs  r10, r3
-       bf      syscall_call
-       mov     #-ENOSYS, r0
-       bra     syscall_exit
-        mov.l  r0, @(OFF_R0,r15)       ! Return value
-
 __restore_all:
        mov     #OFF_SR, r0
        mov.l   @(r0,r15), r0   ! get status register
@@ -388,6 +360,37 @@ syscall_exit:
        bf      syscall_exit_work
        bra     __restore_all
         nop
+
+       .align  2
+syscall_trace_entry:
+       !                       Yes it is traced.
+       mov     r15, r4
+       mov.l   7f, r11         ! Call do_syscall_trace_enter which notifies
+       jsr     @r11            ! superior (will chomp R[0-7])
+        nop
+       cmp/eq  #-1, r0
+       bt      syscall_exit
+       mov.l   r0, @(OFF_R0,r15)       ! Save return value
+       !                       Reload R0-R4 from kernel stack, where the
+       !                       parent may have modified them using
+       !                       ptrace(POKEUSR).  (Note that R0-R2 are
+       !                       reloaded from the kernel stack by syscall_call
+       !                       below, so don't need to be reloaded here.)
+       !                       This allows the parent to rewrite system calls
+       !                       and args on the fly.
+       mov.l   @(OFF_R4,r15), r4   ! arg0
+       mov.l   @(OFF_R5,r15), r5
+       mov.l   @(OFF_R6,r15), r6
+       mov.l   @(OFF_R7,r15), r7   ! arg3
+       mov.l   @(OFF_R3,r15), r3   ! syscall_nr
+       !
+       mov.l   6f, r10                 ! Number of syscalls
+       cmp/hs  r10, r3
+       bf      syscall_call
+       mov     #-ENOSYS, r0
+       bra     syscall_exit
+        mov.l  r0, @(OFF_R0,r15)       ! Return value
+
        .align  2
 #if !defined(CONFIG_CPU_SH2)
 1:     .long   TRA
index 037aab2..004ad01 100644 (file)
@@ -102,7 +102,6 @@ int register_trapped_io(struct trapped_io *tiop)
        pr_warn("unable to install trapped io filter\n");
        return -1;
 }
-EXPORT_SYMBOL_GPL(register_trapped_io);
 
 void __iomem *match_trapped_io_handler(struct list_head *list,
                                       unsigned long offset,
@@ -131,7 +130,6 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
        spin_unlock_irqrestore(&trapped_lock, flags);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(match_trapped_io_handler);
 
 static struct trapped_io *lookup_tiop(unsigned long address)
 {
index ef9e2c9..0a0dff4 100644 (file)
@@ -8,31 +8,31 @@
 #include <linux/module.h>
 #include <linux/io.h>
 
-unsigned int ioread8(void __iomem *addr)
+unsigned int ioread8(const void __iomem *addr)
 {
        return readb(addr);
 }
 EXPORT_SYMBOL(ioread8);
 
-unsigned int ioread16(void __iomem *addr)
+unsigned int ioread16(const void __iomem *addr)
 {
        return readw(addr);
 }
 EXPORT_SYMBOL(ioread16);
 
-unsigned int ioread16be(void __iomem *addr)
+unsigned int ioread16be(const void __iomem *addr)
 {
        return be16_to_cpu(__raw_readw(addr));
 }
 EXPORT_SYMBOL(ioread16be);
 
-unsigned int ioread32(void __iomem *addr)
+unsigned int ioread32(const void __iomem *addr)
 {
        return readl(addr);
 }
 EXPORT_SYMBOL(ioread32);
 
-unsigned int ioread32be(void __iomem *addr)
+unsigned int ioread32be(const void __iomem *addr)
 {
        return be32_to_cpu(__raw_readl(addr));
 }
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(iowrite32be);
  * convert to CPU byte order. We write in "IO byte
  * order" (we also don't have IO barriers).
  */
-static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
 {
        while (--count >= 0) {
                u8 data = __raw_readb(addr);
@@ -83,7 +83,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
        }
 }
 
-static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
 {
        while (--count >= 0) {
                u16 data = __raw_readw(addr);
@@ -92,7 +92,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
        }
 }
 
-static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
 {
        while (--count >= 0) {
                u32 data = __raw_readl(addr);
@@ -125,19 +125,19 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
        }
 }
 
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        mmio_insb(addr, dst, count);
 }
 EXPORT_SYMBOL(ioread8_rep);
 
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        mmio_insw(addr, dst, count);
 }
 EXPORT_SYMBOL(ioread16_rep);
 
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        mmio_insl(addr, dst, count);
 }
index 34f8cdb..f39446a 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/module.h>
 #include <linux/io.h>
+#include <asm/io_trapped.h>
 
 unsigned long sh_io_port_base __read_mostly = -1;
 EXPORT_SYMBOL(sh_io_port_base);
index beadbbd..d606679 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/setup.h>
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/processor.h>
 
 #define MV_NAME_SIZE 32
 
@@ -64,10 +65,10 @@ static int __init early_parse_mv(char *from)
 
        mvp = get_mv_byname(mv_name);
        if (unlikely(!mvp)) {
-               printk("Available vectors:\n\n\t'%s', ", sh_mv.mv_name);
+               pr_info("Available vectors:\n\n\t'%s', ", sh_mv.mv_name);
                for_each_mv(mvp)
-                       printk("'%s', ", mvp->mv_name);
-               printk("\n\n");
+                       pr_cont("'%s', ", mvp->mv_name);
+               pr_cont("\n\n");
                panic("Failed to select machvec '%s' -- halting.\n",
                      mv_name);
        } else
@@ -104,7 +105,7 @@ void __init sh_mv_setup(void)
                        sh_mv = *(struct sh_machine_vector *)&__machvec_start;
        }
 
-       printk(KERN_NOTICE "Booting machvec: %s\n", get_system_type());
+       pr_notice("Booting machvec: %s\n", get_system_type());
 
        /*
         * Manually walk the vec, fill in anything that the board hasn't yet
index 6281f2f..c9d3aa1 100644 (file)
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 
-static int callchain_stack(void *data, char *name)
-{
-       return 0;
-}
-
 static void callchain_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry_ctx *entry = data;
@@ -25,7 +20,6 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
 }
 
 static const struct stacktrace_ops callchain_ops = {
-       .stack          = callchain_stack,
        .address        = callchain_address,
 };
 
index cde0a66..80a5d1c 100644 (file)
 
 void show_regs(struct pt_regs * regs)
 {
-       printk("\n");
+       pr_info("\n");
        show_regs_print_info(KERN_DEFAULT);
 
-       printk("PC is at %pS\n", (void *)instruction_pointer(regs));
-       printk("PR is at %pS\n", (void *)regs->pr);
+       pr_info("PC is at %pS\n", (void *)instruction_pointer(regs));
+       pr_info("PR is at %pS\n", (void *)regs->pr);
 
-       printk("PC  : %08lx SP  : %08lx SR  : %08lx ",
-              regs->pc, regs->regs[15], regs->sr);
+       pr_info("PC  : %08lx SP  : %08lx SR  : %08lx ", regs->pc,
+               regs->regs[15], regs->sr);
 #ifdef CONFIG_MMU
-       printk("TEA : %08x\n", __raw_readl(MMU_TEA));
+       pr_cont("TEA : %08x\n", __raw_readl(MMU_TEA));
 #else
-       printk("\n");
+       pr_cont("\n");
 #endif
 
-       printk("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
-              regs->regs[0],regs->regs[1],
-              regs->regs[2],regs->regs[3]);
-       printk("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
-              regs->regs[4],regs->regs[5],
-              regs->regs[6],regs->regs[7]);
-       printk("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
-              regs->regs[8],regs->regs[9],
-              regs->regs[10],regs->regs[11]);
-       printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
-              regs->regs[12],regs->regs[13],
-              regs->regs[14]);
-       printk("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
-              regs->mach, regs->macl, regs->gbr, regs->pr);
+       pr_info("R0  : %08lx R1  : %08lx R2  : %08lx R3  : %08lx\n",
+               regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3]);
+       pr_info("R4  : %08lx R5  : %08lx R6  : %08lx R7  : %08lx\n",
+               regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
+       pr_info("R8  : %08lx R9  : %08lx R10 : %08lx R11 : %08lx\n",
+               regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
+       pr_info("R12 : %08lx R13 : %08lx R14 : %08lx\n",
+               regs->regs[12], regs->regs[13], regs->regs[14]);
+       pr_info("MACH: %08lx MACL: %08lx GBR : %08lx PR  : %08lx\n",
+               regs->mach, regs->macl, regs->gbr, regs->pr);
 
        show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT);
        show_code(regs);
@@ -93,24 +89,6 @@ void release_thread(struct task_struct *dead_task)
        /* do nothing */
 }
 
-/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-       int fpvalid = 0;
-
-#if defined(CONFIG_SH_FPU)
-       struct task_struct *tsk = current;
-
-       fpvalid = !!tsk_used_math(tsk);
-       if (fpvalid)
-               fpvalid = !fpregs_get(tsk, NULL,
-                                     (struct membuf){fpu, sizeof(*fpu)});
-#endif
-
-       return fpvalid;
-}
-EXPORT_SYMBOL(dump_fpu);
-
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
index 5c93bdb..b05bf92 100644 (file)
@@ -165,7 +165,7 @@ static int genregs_set(struct task_struct *target,
 }
 
 #ifdef CONFIG_SH_FPU
-int fpregs_get(struct task_struct *target,
+static int fpregs_get(struct task_struct *target,
               const struct user_regset *regset,
               struct membuf to)
 {
@@ -457,8 +457,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
-       secure_computing_strict(regs->regs[0]);
-
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
                /*
@@ -468,6 +466,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
                 */
                ret = -1L;
 
+       if (secure_computing() == -1)
+               return -1;
+
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[0]);
 
index 2950b19..daf0b53 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
-static int save_stack_stack(void *data, char *name)
-{
-       return 0;
-}
-
 /*
  * Save stack-backtrace addresses into a stack_trace buffer.
  */
@@ -40,7 +35,6 @@ static void save_stack_address(void *data, unsigned long addr, int reliable)
 }
 
 static const struct stacktrace_ops save_stack_ops = {
-       .stack = save_stack_stack,
        .address = save_stack_address,
 };
 
@@ -73,7 +67,6 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 }
 
 static const struct stacktrace_ops save_stack_ops_nosched = {
-       .stack = save_stack_stack,
        .address = save_stack_address_nosched,
 };
 
index 96848db..ae0a00b 100644 (file)
 146    common  writev                          sys_writev
 147    common  getsid                          sys_getsid
 148    common  fdatasync                       sys_fdatasync
-149    common  _sysctl                         sys_sysctl
+149    common  _sysctl                         sys_ni_syscall
 150    common  mlock                           sys_mlock
 151    common  munlock                         sys_munlock
 152    common  mlockall                        sys_mlockall
index 058c618..b62ad0b 100644 (file)
@@ -482,8 +482,6 @@ asmlinkage void do_address_error(struct pt_regs *regs,
        error_code = lookup_exception_vector();
 #endif
 
-       oldfs = get_fs();
-
        if (user_mode(regs)) {
                int si_code = BUS_ADRERR;
                unsigned int user_action;
@@ -491,13 +489,13 @@ asmlinkage void do_address_error(struct pt_regs *regs,
                local_irq_enable();
                inc_unaligned_user_access();
 
-               set_fs(USER_DS);
+               oldfs = force_uaccess_begin();
                if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
                                   sizeof(instruction))) {
-                       set_fs(oldfs);
+                       force_uaccess_end(oldfs);
                        goto uspace_segv;
                }
-               set_fs(oldfs);
+               force_uaccess_end(oldfs);
 
                /* shout about userspace fixups */
                unaligned_fixups_notify(current, instruction, regs);
@@ -520,11 +518,11 @@ fixup:
                        goto uspace_segv;
                }
 
-               set_fs(USER_DS);
+               oldfs = force_uaccess_begin();
                tmp = handle_unaligned_access(instruction, regs,
                                              &user_mem_access, 0,
                                              address);
-               set_fs(oldfs);
+               force_uaccess_end(oldfs);
 
                if (tmp == 0)
                        return; /* sorted */
index d0abbe5..eb473d3 100644 (file)
@@ -30,5 +30,3 @@ memset-$(CONFIG_CPU_SH4)      := memset-sh4.o
 lib-$(CONFIG_MMU)              += copy_page.o __clear_user.o
 lib-$(CONFIG_MCOUNT)           += mcount.o
 lib-y                          += $(memcpy-y) $(memset-y) $(udivsi3-y)
-
-ccflags-y := -Werror
index 540e670..dad8e6a 100644 (file)
@@ -29,7 +29,6 @@ void __delay(unsigned long loops)
                : "0" (loops)
                : "t");
 }
-EXPORT_SYMBOL(__delay);
 
 inline void __const_udelay(unsigned long xloops)
 {
index 487da0f..f69ddc7 100644 (file)
@@ -43,5 +43,3 @@ obj-$(CONFIG_UNCACHED_MAPPING)        += uncached.o
 obj-$(CONFIG_HAVE_SRAM_POOL)   += sram.o
 
 GCOV_PROFILE_pmb.o := n
-
-ccflags-y := -Werror
index 3169a34..0de206c 100644 (file)
@@ -57,8 +57,6 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
                return -ENOMEM;
        }
 
-       memset(buf, 0, memsize);
-
        r->flags = IORESOURCE_MEM;
        r->start = dma_handle;
        r->end = r->start + memsize - 1;
index fbe1f2f..88a1f45 100644 (file)
@@ -208,13 +208,12 @@ show_fault_oops(struct pt_regs *regs, unsigned long address)
        if (!oops_may_print())
                return;
 
-       printk(KERN_ALERT "PC:");
        pr_alert("BUG: unable to handle kernel %s at %08lx\n",
                 address < PAGE_SIZE ? "NULL pointer dereference"
                                     : "paging request",
                 address);
        pr_alert("PC:");
-       printk_address(regs->pc, 1, KERN_ALERT);
+       printk_address(regs->pc, 1);
 
        show_pte(NULL, address);
 }
@@ -482,22 +481,13 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
                if (mm_fault_error(regs, error_code, address, fault))
                        return;
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-                                     regs, address);
-               } else {
-                       tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-                                     regs, address);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 613de80..4735176 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/cache.h>
 #include <asm/pgalloc.h>
 #include <linux/sizes.h>
+#include "ioremap.h"
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
@@ -425,15 +426,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
        return ret;
 }
 
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 addr)
-{
-       /* Node 0 for now.. */
-       return 0;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
 void arch_remove_memory(int nid, u64 start, u64 size,
                        struct vmem_altmap *altmap)
 {
index f6d0224..2134258 100644 (file)
 #include <linux/mm.h>
 #include <linux/pci.h>
 #include <linux/io.h>
+#include <asm/io_trapped.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu.h>
+#include "ioremap.h"
+
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment.  This is true for P1
+ * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+       phys_addr_t last_addr = offset + size - 1;
+
+       /*
+        * For P1 and P2 space this is trivial, as everything is already
+        * mapped. Uncached access for P1 addresses are done through P2.
+        * In the P3 case or for addresses outside of the 29-bit space,
+        * mapping must be done by the PMB or by using page tables.
+        */
+       if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+               u64 flags = pgprot_val(prot);
+
+               /*
+                * Anything using the legacy PTEA space attributes needs
+                * to be kicked down to page table mappings.
+                */
+               if (unlikely(flags & _PAGE_PCC_MASK))
+                       return NULL;
+               if (unlikely(flags & _PAGE_CACHABLE))
+                       return (void __iomem *)P1SEGADDR(offset);
+
+               return (void __iomem *)P2SEGADDR(offset);
+       }
+
+       /* P4 above the store queues are always mapped. */
+       if (unlikely(offset >= P3_ADDR_MAX))
+               return (void __iomem *)P4SEGADDR(offset);
+
+       return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot)            NULL
+#endif /* CONFIG_29BIT */
 
 /*
  * Remap an arbitrary physical address space into the kernel virtual
@@ -42,6 +89,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
        unsigned long offset, last_addr, addr, orig_addr;
        void __iomem *mapped;
 
+       mapped = __ioremap_trapped(phys_addr, size);
+       if (mapped)
+               return mapped;
+
+       mapped = __ioremap_29bit(phys_addr, size, pgprot);
+       if (mapped)
+               return mapped;
+
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
        if (!size || last_addr < phys_addr)
diff --git a/arch/sh/mm/ioremap.h b/arch/sh/mm/ioremap.h
new file mode 100644 (file)
index 0000000..f2544e7
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _SH_MM_IORMEMAP_H
+#define _SH_MM_IORMEMAP_H 1
+
+#ifdef CONFIG_IOREMAP_FIXED
+void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
+int iounmap_fixed(void __iomem *);
+void ioremap_fixed_init(void);
+#else
+static inline void __iomem *
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
+{
+       BUG();
+       return NULL;
+}
+static inline void ioremap_fixed_init(void)
+{
+}
+static inline int iounmap_fixed(void __iomem *addr)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_IOREMAP_FIXED */
+#endif /* _SH_MM_IORMEMAP_H */
index aab3f82..136113b 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
+#include "ioremap.h"
 
 struct ioremap_map {
        void __iomem *addr;
index 5c8f924..cf7ce4b 100644 (file)
@@ -2,8 +2,6 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
-
 static struct kmem_cache *pgd_cachep;
 #if PAGETABLE_LEVELS > 2
 static struct kmem_cache *pmd_cachep;
@@ -13,6 +11,7 @@ void pgd_ctor(void *x)
 {
        pgd_t *pgd = x;
 
+       memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
        memcpy(pgd + USER_PTRS_PER_PGD,
               swapper_pg_dir + USER_PTRS_PER_PGD,
               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
@@ -32,7 +31,7 @@ void pgtable_cache_init(void)
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
+       return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -48,7 +47,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 
 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
+       return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
 }
 
 void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index f1205f9..cc16cf8 100644 (file)
 #include <asm/sections.h>
 #include <asm/stacktrace.h>
 
-static int backtrace_stack(void *data, char *name)
-{
-       /* Yes, we want all stacks */
-       return 0;
-}
-
 static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        unsigned int *depth = data;
@@ -34,7 +28,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 }
 
 static struct stacktrace_ops backtrace_ops = {
-       .stack = backtrace_stack,
        .address = backtrace_address,
 };
 
index 569977e..29e6488 100644 (file)
@@ -46,7 +46,6 @@ X3PROTO                       SH_X3PROTO
 MAGICPANELR2           SH_MAGIC_PANEL_R2
 R2D_PLUS               RTS7751R2D_PLUS
 R2D_1                  RTS7751R2D_1
-CAYMAN                 SH_CAYMAN
 SDK7780                        SH_SDK7780
 MIGOR                  SH_MIGOR
 RSK7201                        SH_RSK7201
index 1dd1b61..aa9a676 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/page.h>
 
 #define SECTION_SIZE_BITS       30
-#define MAX_PHYSADDR_BITS       MAX_PHYS_ADDRESS_BITS
 #define MAX_PHYSMEM_BITS        MAX_PHYS_ADDRESS_BITS
 
 #endif /* !(__KERNEL__) */
index c7e4fb6..dcfad46 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _SPARC64_TIMER_H
 #define _SPARC64_TIMER_H
 
+#include <uapi/asm/asi.h>
 #include <linux/types.h>
 #include <linux/init.h>
 
index d6d8413..0a2d3eb 100644 (file)
@@ -28,7 +28,7 @@
 #define get_fs()       (current->thread.current_ds)
 #define set_fs(val)    ((current->thread.current_ds) = (val))
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
  * can be fairly lightweight.
index bf9d330..698cf69 100644 (file)
@@ -32,7 +32,7 @@
 
 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
 
-#define segment_eq(a, b)  ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define set_fs(val)                                                            \
 do {                                                                           \
index 0289503..6eaf5cf 100644 (file)
@@ -6,7 +6,8 @@
 #define _ASM_SPARC_VVAR_DATA_H
 
 #include <asm/clocksource.h>
-#include <linux/seqlock.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
 #include <linux/time.h>
 #include <linux/types.h>
 
index 3f519e1..adfcaea 100644 (file)
@@ -380,55 +380,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        return 0;
 }
 
-/*
- * fill in the fpu structure for a core dump.
- */
-int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
-{
-       if (used_math()) {
-               memset(fpregs, 0, sizeof(*fpregs));
-               fpregs->pr_q_entrysize = 8;
-               return 1;
-       }
-#ifdef CONFIG_SMP
-       if (test_thread_flag(TIF_USEDFPU)) {
-               put_psr(get_psr() | PSR_EF);
-               fpsave(&current->thread.float_regs[0], &current->thread.fsr,
-                      &current->thread.fpqueue[0], &current->thread.fpqdepth);
-               if (regs != NULL) {
-                       regs->psr &= ~(PSR_EF);
-                       clear_thread_flag(TIF_USEDFPU);
-               }
-       }
-#else
-       if (current == last_task_used_math) {
-               put_psr(get_psr() | PSR_EF);
-               fpsave(&current->thread.float_regs[0], &current->thread.fsr,
-                      &current->thread.fpqueue[0], &current->thread.fpqdepth);
-               if (regs != NULL) {
-                       regs->psr &= ~(PSR_EF);
-                       last_task_used_math = NULL;
-               }
-       }
-#endif
-       memcpy(&fpregs->pr_fr.pr_regs[0],
-              &current->thread.float_regs[0],
-              (sizeof(unsigned long) * 32));
-       fpregs->pr_fsr = current->thread.fsr;
-       fpregs->pr_qcnt = current->thread.fpqdepth;
-       fpregs->pr_q_entrysize = 8;
-       fpregs->pr_en = 1;
-       if(fpregs->pr_qcnt != 0) {
-               memcpy(&fpregs->pr_q[0],
-                      &current->thread.fpqueue[0],
-                      sizeof(struct fpq) * fpregs->pr_qcnt);
-       }
-       /* Zero out the rest. */
-       memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
-              sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
-       return 1;
-}
-
 unsigned long get_wchan(struct task_struct *task)
 {
        unsigned long pc, fp, bias = 0;
index 04ef19b..a75093b 100644 (file)
@@ -666,72 +666,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        return 0;
 }
 
-typedef struct {
-       union {
-               unsigned int    pr_regs[32];
-               unsigned long   pr_dregs[16];
-       } pr_fr;
-       unsigned int __unused;
-       unsigned int    pr_fsr;
-       unsigned char   pr_qcnt;
-       unsigned char   pr_q_entrysize;
-       unsigned char   pr_en;
-       unsigned int    pr_q[64];
-} elf_fpregset_t32;
-
-/*
- * fill in the fpu structure for a core dump.
- */
-int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
-{
-       unsigned long *kfpregs = current_thread_info()->fpregs;
-       unsigned long fprs = current_thread_info()->fpsaved[0];
-
-       if (test_thread_flag(TIF_32BIT)) {
-               elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
-
-               if (fprs & FPRS_DL)
-                       memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
-                              sizeof(unsigned int) * 32);
-               else
-                       memset(&fpregs32->pr_fr.pr_regs[0], 0,
-                              sizeof(unsigned int) * 32);
-               fpregs32->pr_qcnt = 0;
-               fpregs32->pr_q_entrysize = 8;
-               memset(&fpregs32->pr_q[0], 0,
-                      (sizeof(unsigned int) * 64));
-               if (fprs & FPRS_FEF) {
-                       fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
-                       fpregs32->pr_en = 1;
-               } else {
-                       fpregs32->pr_fsr = 0;
-                       fpregs32->pr_en = 0;
-               }
-       } else {
-               if(fprs & FPRS_DL)
-                       memcpy(&fpregs->pr_regs[0], kfpregs,
-                              sizeof(unsigned int) * 32);
-               else
-                       memset(&fpregs->pr_regs[0], 0,
-                              sizeof(unsigned int) * 32);
-               if(fprs & FPRS_DU)
-                       memcpy(&fpregs->pr_regs[16], kfpregs+16,
-                              sizeof(unsigned int) * 32);
-               else
-                       memset(&fpregs->pr_regs[16], 0,
-                              sizeof(unsigned int) * 32);
-               if(fprs & FPRS_FEF) {
-                       fpregs->pr_fsr = current_thread_info()->xfsr[0];
-                       fpregs->pr_gsr = current_thread_info()->gsr[0];
-               } else {
-                       fpregs->pr_fsr = fpregs->pr_gsr = 0;
-               }
-               fpregs->pr_fprs = fprs;
-       }
-       return 1;
-}
-EXPORT_SYMBOL(dump_fpu);
-
 unsigned long get_wchan(struct task_struct *task)
 {
        unsigned long pc, fp, bias = 0;
index 46024e8..4af114e 100644 (file)
 249    64      nanosleep               sys_nanosleep
 250    32      mremap                  sys_mremap
 250    64      mremap                  sys_64_mremap
-251    common  _sysctl                 sys_sysctl                      compat_sys_sysctl
+251    common  _sysctl                 sys_ni_syscall
 252    common  getsid                  sys_getsid
 253    common  fdatasync               sys_fdatasync
 254    32      nfsservctl              sys_ni_syscall                  sys_nis_syscall
index 5888066..0e27437 100644 (file)
@@ -7,7 +7,6 @@
  *  a different vsyscall implementation for Linux/IA32 and for the name.
  */
 
-#include <linux/seqlock.h>
 #include <linux/time.h>
 #include <linux/timekeeper_internal.h>
 
index cfef656..8071bfd 100644 (file)
@@ -234,7 +234,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -250,15 +250,6 @@ good_area:
        }
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       current->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
-                                     1, regs, address);
-               } else {
-                       current->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-                                     1, regs, address);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
@@ -410,7 +401,7 @@ good_area:
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
-       switch (handle_mm_fault(vma, address, flags)) {
+       switch (handle_mm_fault(vma, address, flags, NULL)) {
        case VM_FAULT_SIGBUS:
        case VM_FAULT_OOM:
                goto do_sigbus;
index a380661..0a6bcc8 100644 (file)
@@ -422,7 +422,7 @@ good_area:
                        goto bad_area;
        }
 
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                goto exit_exception;
@@ -438,15 +438,6 @@ good_area:
        }
 
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR) {
-                       current->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
-                                     1, regs, address);
-               } else {
-                       current->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-                                     1, regs, address);
-               }
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
index 708cb63..f44355e 100644 (file)
@@ -54,7 +54,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 # optimize sibling calls.
 #
 CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
        -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
 
@@ -93,7 +93,7 @@ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
-KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += -fno-stack-protector
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
index ef69be1..eb51fec 100644 (file)
@@ -14,6 +14,7 @@ config UML
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_BUGVERBOSE
+       select NO_DMA
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
@@ -167,9 +168,6 @@ config MMAPPER
          This driver allows a host file to be used as emulated IO memory inside
          UML.
 
-config NO_DMA
-       def_bool y
-
 config PGTABLE_LEVELS
        int
        default 3 if 3_LEVEL_PGTABLES
index 3f27aa3..1cea46f 100644 (file)
@@ -121,8 +121,7 @@ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
 LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
 
 CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
-       $(call cc-option, -fno-stack-protector,) \
-       $(call cc-option, -fno-stack-protector-all,)
+       -fno-stack-protector $(call cc-option, -fno-stack-protector-all)
 
 # Options used by linker script
 export LDS_START      := $(START)
index 351aee5..a6c4bb6 100644 (file)
@@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
                }
                break;
        case VHOST_USER_SLAVE_IOTLB_MSG:
-               /* not supported - VIRTIO_F_IOMMU_PLATFORM */
+               /* not supported - VIRTIO_F_ACCESS_PLATFORM */
        case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
                /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
        default:
index 2b3afa3..ad12f78 100644 (file)
@@ -71,7 +71,7 @@ good_area:
        do {
                vm_fault_t fault;
 
-               fault = handle_mm_fault(vma, address, flags);
+               fault = handle_mm_fault(vma, address, flags, NULL);
 
                if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
                        goto out_nosemaphore;
@@ -88,10 +88,6 @@ good_area:
                        BUG();
                }
                if (flags & FAULT_FLAG_ALLOW_RETRY) {
-                       if (fault & VM_FAULT_MAJOR)
-                               current->maj_flt++;
-                       else
-                               current->min_flt++;
                        if (fault & VM_FAULT_RETRY) {
                                flags |= FAULT_FLAG_TRIED;
 
index 9a28495..7101ac6 100644 (file)
@@ -209,6 +209,7 @@ config X86
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select MMU_GATHER_RCU_TABLE_FREE                if PARAVIRT
+       select HAVE_POSIX_CPU_TIMERS_TASK_WORK
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
        select HAVE_FUNCTION_ARG_ACCESS_API
index 1e634d7..4346ffb 100644 (file)
@@ -36,8 +36,8 @@ REALMODE_CFLAGS       := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
                   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
                   -mno-mmx -mno-sse
 
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += -ffreestanding
+REALMODE_CFLAGS += -fno-stack-protector
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
 REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
 export REALMODE_CFLAGS
index c08714a..3962f59 100644 (file)
@@ -35,8 +35,8 @@ cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small
 KBUILD_CFLAGS += $(cflags-y)
 KBUILD_CFLAGS += -mno-mmx -mno-sse
-KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
-KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+KBUILD_CFLAGS += -ffreestanding
+KBUILD_CFLAGS += -fno-stack-protector
 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
 KBUILD_CFLAGS += -Wno-pointer-sign
index 39e592d..e478e40 100644 (file)
 #define STATIC         static
 
 /*
- * Use normal definitions of mem*() from string.c. There are already
- * included header files which expect a definition of memset() and by
- * the time we define memset macro, it is too late.
+ * Provide definitions of memzero and memmove as some of the decompressors will
+ * try to define their own functions if these are not defined as macros.
  */
-#undef memcpy
-#undef memset
 #define memzero(s, n)  memset((s), 0, (n))
 #define memmove                memmove
 
index 995f7b7..a232da4 100644 (file)
@@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len);
 void *memset(void *dst, int c, size_t len);
 int memcmp(const void *s1, const void *s2, size_t len);
 
-/*
- * Access builtin version by default. If one needs to use optimized version,
- * do "undef memcpy" in .c file and link against right string.c
- */
+/* Access builtin version by default. */
 #define memcpy(d,s,l) __builtin_memcpy(d,s,l)
 #define memset(d,c,l) __builtin_memset(d,c,l)
 #define memcmp __builtin_memcmp
index 29b7d52..df8c017 100644 (file)
 
 .macro SWITCH_TO_KERNEL_STACK
 
-       ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
-
        BUG_IF_WRONG_CR3
 
        SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
  */
 .macro SWITCH_TO_ENTRY_STACK
 
-       ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
-
        /* Bytes to copy */
        movl    $PTREGS_SIZE, %ecx
 
@@ -872,17 +868,6 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
  * will ignore all of the single-step traps generated in this range.
  */
 
-#ifdef CONFIG_XEN_PV
-/*
- * Xen doesn't set %esp to be precisely what the normal SYSENTER
- * entry point expects, so fix it up before using the normal path.
- */
-SYM_CODE_START(xen_sysenter_target)
-       addl    $5*4, %esp                      /* remove xen-provided frame */
-       jmp     .Lsysenter_past_esp
-SYM_CODE_END(xen_sysenter_target)
-#endif
-
 /*
  * 32-bit SYSENTER entry.
  *
@@ -965,9 +950,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
 
        movl    %esp, %eax
        call    do_SYSENTER_32
-       /* XEN PV guests always use IRET path */
-       ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
-                   "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+       testl   %eax, %eax
+       jz      .Lsyscall_32_done
 
        STACKLEAK_ERASE
 
@@ -1165,95 +1149,6 @@ SYM_FUNC_END(entry_INT80_32)
 #endif
 .endm
 
-#ifdef CONFIG_PARAVIRT
-SYM_CODE_START(native_iret)
-       iret
-       _ASM_EXTABLE(native_iret, asm_iret_error)
-SYM_CODE_END(native_iret)
-#endif
-
-#ifdef CONFIG_XEN_PV
-/*
- * See comment in entry_64.S for further explanation
- *
- * Note: This is not an actual IDT entry point. It's a XEN specific entry
- * point and therefore named to match the 64-bit trampoline counterpart.
- */
-SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback)
-       /*
-        * Check to see if we got the event in the critical
-        * region in xen_iret_direct, after we've reenabled
-        * events and checked for pending events.  This simulates
-        * iret instruction's behaviour where it delivers a
-        * pending interrupt when enabling interrupts:
-        */
-       cmpl    $xen_iret_start_crit, (%esp)
-       jb      1f
-       cmpl    $xen_iret_end_crit, (%esp)
-       jae     1f
-       call    xen_iret_crit_fixup
-1:
-       pushl   $-1                             /* orig_ax = -1 => not a system call */
-       SAVE_ALL
-       ENCODE_FRAME_POINTER
-
-       mov     %esp, %eax
-       call    xen_pv_evtchn_do_upcall
-       jmp     handle_exception_return
-SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback)
-
-/*
- * Hypervisor uses this for application faults while it executes.
- * We get here for two reasons:
- *  1. Fault while reloading DS, ES, FS or GS
- *  2. Fault while executing IRET
- * Category 1 we fix up by reattempting the load, and zeroing the segment
- * register if the load fails.
- * Category 2 we fix up by jumping to do_iret_error. We cannot use the
- * normal Linux return path in this case because if we use the IRET hypercall
- * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
- * We distinguish between categories by maintaining a status value in EAX.
- */
-SYM_FUNC_START(xen_failsafe_callback)
-       pushl   %eax
-       movl    $1, %eax
-1:     mov     4(%esp), %ds
-2:     mov     8(%esp), %es
-3:     mov     12(%esp), %fs
-4:     mov     16(%esp), %gs
-       /* EAX == 0 => Category 1 (Bad segment)
-          EAX != 0 => Category 2 (Bad IRET) */
-       testl   %eax, %eax
-       popl    %eax
-       lea     16(%esp), %esp
-       jz      5f
-       jmp     asm_iret_error
-5:     pushl   $-1                             /* orig_ax = -1 => not a system call */
-       SAVE_ALL
-       ENCODE_FRAME_POINTER
-       jmp     handle_exception_return
-
-.section .fixup, "ax"
-6:     xorl    %eax, %eax
-       movl    %eax, 4(%esp)
-       jmp     1b
-7:     xorl    %eax, %eax
-       movl    %eax, 8(%esp)
-       jmp     2b
-8:     xorl    %eax, %eax
-       movl    %eax, 12(%esp)
-       jmp     3b
-9:     xorl    %eax, %eax
-       movl    %eax, 16(%esp)
-       jmp     4b
-.previous
-       _ASM_EXTABLE(1b, 6b)
-       _ASM_EXTABLE(2b, 7b)
-       _ASM_EXTABLE(3b, 8b)
-       _ASM_EXTABLE(4b, 9b)
-SYM_FUNC_END(xen_failsafe_callback)
-#endif /* CONFIG_XEN_PV */
-
 SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
        /* the function address is in %gs's slot on the stack */
        SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
index e31a752..9d11028 100644 (file)
 146    i386    writev                  sys_writev                      compat_sys_writev
 147    i386    getsid                  sys_getsid
 148    i386    fdatasync               sys_fdatasync
-149    i386    _sysctl                 sys_sysctl                      compat_sys_sysctl
+149    i386    _sysctl                 sys_ni_syscall
 150    i386    mlock                   sys_mlock
 151    i386    munlock                 sys_munlock
 152    i386    mlockall                sys_mlockall
index 9d82078..f30d6ae 100644 (file)
 153    common  vhangup                 sys_vhangup
 154    common  modify_ldt              sys_modify_ldt
 155    common  pivot_root              sys_pivot_root
-156    64      _sysctl                 sys_sysctl
+156    64      _sysctl                 sys_ni_syscall
 157    common  prctl                   sys_prctl
 158    common  arch_prctl              sys_arch_prctl
 159    common  adjtimex                sys_adjtimex
index 04e65f0..215376d 100644 (file)
@@ -82,7 +82,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 # optimize sibling calls.
 #
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
        -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
 
@@ -151,7 +151,7 @@ KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
-KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += -fno-stack-protector
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
index e78047d..2cbd399 100644 (file)
@@ -16,33 +16,3 @@ ELFNOTE_START(Linux, 0, "a")
 ELFNOTE_END
 
 BUILD_SALT
-
-#ifdef CONFIG_XEN
-/*
- * Add a special note telling glibc's dynamic linker a fake hardware
- * flavor that it will use to choose the search path for libraries in the
- * same way it uses real hardware capabilities like "mmx".
- * We supply "nosegneg" as the fake capability, to indicate that we
- * do not like negative offsets in instructions using segment overrides,
- * since we implement those inefficiently.  This makes it possible to
- * install libraries optimized to avoid those access patterns in someplace
- * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
- * corresponding to the bits here is needed to make ldconfig work right.
- * It should contain:
- *     hwcap 1 nosegneg
- * to match the mapping of bit to name that we give here.
- *
- * At runtime, the fake hardware feature will be considered to be present
- * if its bit is set in the mask word.  So, we start with the mask 0, and
- * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
- */
-
-#include "../../xen/vdso.h"    /* Defines VDSO_NOTE_NONEGSEG_BIT.  */
-
-ELFNOTE_START(GNU, 2, "a")
-       .long 1                 /* ncaps */
-VDSO32_NOTE_MASK:              /* Symbol used by arch/x86/xen/setup.c */
-       .long 0                 /* mask */
-       .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
-ELFNOTE_END
-#endif
index 68b3882..67b411f 100644 (file)
@@ -130,11 +130,17 @@ struct rapl_pmus {
        struct rapl_pmu         *pmus[];
 };
 
+enum rapl_unit_quirk {
+       RAPL_UNIT_QUIRK_NONE,
+       RAPL_UNIT_QUIRK_INTEL_HSW,
+       RAPL_UNIT_QUIRK_INTEL_SPR,
+};
+
 struct rapl_model {
        struct perf_msr *rapl_msrs;
        unsigned long   events;
        unsigned int    msr_power_unit;
-       bool            apply_quirk;
+       enum rapl_unit_quirk    unit_quirk;
 };
 
  /* 1/2^hw_unit Joule */
@@ -612,14 +618,28 @@ static int rapl_check_hw_unit(struct rapl_model *rm)
        for (i = 0; i < NR_RAPL_DOMAINS; i++)
                rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
 
+       switch (rm->unit_quirk) {
        /*
         * DRAM domain on HSW server and KNL has fixed energy unit which can be
         * different than the unit from power unit MSR. See
         * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
         * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
         */
-       if (rm->apply_quirk)
+       case RAPL_UNIT_QUIRK_INTEL_HSW:
+               rapl_hw_unit[PERF_RAPL_RAM] = 16;
+               break;
+       /*
+        * SPR shares the same DRAM domain energy unit as HSW, plus it
+        * also has a fixed energy unit for Psys domain.
+        */
+       case RAPL_UNIT_QUIRK_INTEL_SPR:
                rapl_hw_unit[PERF_RAPL_RAM] = 16;
+               rapl_hw_unit[PERF_RAPL_PSYS] = 0;
+               break;
+       default:
+               break;
+       }
+
 
        /*
         * Calculate the timer rate:
@@ -665,7 +685,7 @@ static const struct attribute_group *rapl_attr_update[] = {
        &rapl_events_pkg_group,
        &rapl_events_ram_group,
        &rapl_events_gpu_group,
-       &rapl_events_gpu_group,
+       &rapl_events_psys_group,
        NULL,
 };
 
@@ -698,7 +718,6 @@ static struct rapl_model model_snb = {
        .events         = BIT(PERF_RAPL_PP0) |
                          BIT(PERF_RAPL_PKG) |
                          BIT(PERF_RAPL_PP1),
-       .apply_quirk    = false,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
@@ -707,7 +726,6 @@ static struct rapl_model model_snbep = {
        .events         = BIT(PERF_RAPL_PP0) |
                          BIT(PERF_RAPL_PKG) |
                          BIT(PERF_RAPL_RAM),
-       .apply_quirk    = false,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
@@ -717,7 +735,6 @@ static struct rapl_model model_hsw = {
                          BIT(PERF_RAPL_PKG) |
                          BIT(PERF_RAPL_RAM) |
                          BIT(PERF_RAPL_PP1),
-       .apply_quirk    = false,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
@@ -726,7 +743,7 @@ static struct rapl_model model_hsx = {
        .events         = BIT(PERF_RAPL_PP0) |
                          BIT(PERF_RAPL_PKG) |
                          BIT(PERF_RAPL_RAM),
-       .apply_quirk    = true,
+       .unit_quirk     = RAPL_UNIT_QUIRK_INTEL_HSW,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
@@ -734,7 +751,7 @@ static struct rapl_model model_hsx = {
 static struct rapl_model model_knl = {
        .events         = BIT(PERF_RAPL_PKG) |
                          BIT(PERF_RAPL_RAM),
-       .apply_quirk    = true,
+       .unit_quirk     = RAPL_UNIT_QUIRK_INTEL_HSW,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
@@ -745,14 +762,22 @@ static struct rapl_model model_skl = {
                          BIT(PERF_RAPL_RAM) |
                          BIT(PERF_RAPL_PP1) |
                          BIT(PERF_RAPL_PSYS),
-       .apply_quirk    = false,
+       .msr_power_unit = MSR_RAPL_POWER_UNIT,
+       .rapl_msrs      = intel_rapl_msrs,
+};
+
+static struct rapl_model model_spr = {
+       .events         = BIT(PERF_RAPL_PP0) |
+                         BIT(PERF_RAPL_PKG) |
+                         BIT(PERF_RAPL_RAM) |
+                         BIT(PERF_RAPL_PSYS),
+       .unit_quirk     = RAPL_UNIT_QUIRK_INTEL_SPR,
        .msr_power_unit = MSR_RAPL_POWER_UNIT,
        .rapl_msrs      = intel_rapl_msrs,
 };
 
 static struct rapl_model model_amd_fam17h = {
        .events         = BIT(PERF_RAPL_PKG),
-       .apply_quirk    = false,
        .msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
        .rapl_msrs      = amd_rapl_msrs,
 };
@@ -787,6 +812,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &model_hsx),
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &model_spr),
        X86_MATCH_VENDOR_FAM(AMD,       0x17,           &model_amd_fam17h),
        X86_MATCH_VENDOR_FAM(HYGON,     0x18,           &model_amd_fam17h),
        {},
index 49bd6cf..7c0a52c 100644 (file)
@@ -3,9 +3,6 @@
 #define _ASM_X86_DEVICE_H
 
 struct dev_archdata {
-#ifdef CONFIG_IOMMU_API
-       void *iommu; /* hook for IOMMU specific extension */
-#endif
 };
 
 struct pdev_archdata {
index b9527a5..0f0dd64 100644 (file)
@@ -26,9 +26,9 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
-#include <asm/acpi.h>
 #include <asm/apicdef.h>
 #include <asm/page.h>
+#include <asm/pgtable_types.h>
 #ifdef CONFIG_X86_32
 #include <linux/threads.h>
 #include <asm/kmap_types.h>
index 60b944d..4f77b8f 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/io.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
+#include <asm/paravirt.h>
 
 typedef int (*hyperv_fill_flush_list_func)(
                struct hv_guest_mapping_flush_list *flush,
@@ -54,6 +55,17 @@ typedef int (*hyperv_fill_flush_list_func)(
        vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
 #define hv_get_raw_timer() rdtsc_ordered()
 
+/*
+ * Reference to pv_ops must be inline so objtool
+ * detection of noinstr violations can work correctly.
+ */
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
+{
+#ifdef CONFIG_PARAVIRT
+       pv_ops.time.sched_clock = sched_clock;
+#endif
+}
+
 void hyperv_vector_handler(struct pt_regs *regs);
 
 static inline void hv_enable_stimer0_percpu_irq(int irq) {}
index 6e81788..28996fe 100644 (file)
@@ -25,7 +25,7 @@ void entry_SYSENTER_compat(void);
 void __end_entry_SYSENTER_compat(void);
 void entry_SYSCALL_compat(void);
 void entry_INT80_compat(void);
-#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+#ifdef CONFIG_XEN_PV
 void xen_entry_INT80_compat(void);
 #endif
 #endif
index 6669164..9646c30 100644 (file)
@@ -301,7 +301,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
 extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 extern void early_ignore_irq(void);
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+#ifdef CONFIG_XEN_PV
 extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
 #endif
 
index e15f364..c0538f8 100644 (file)
@@ -5,16 +5,6 @@
 #include <linux/cpumask.h>
 #include <asm/percpu.h>
 
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <asm/mpspec.h>
-# include <asm/apic.h>
-# ifdef CONFIG_X86_IO_APIC
-#  include <asm/io_apic.h>
-# endif
-#endif
 #include <asm/thread_info.h>
 #include <asm/cpumask.h>
 
index b7b2624..01a300a 100644 (file)
@@ -6,6 +6,7 @@
 #define _ASM_X86_TSC_H
 
 #include <asm/processor.h>
+#include <asm/cpufeature.h>
 
 /*
  * Standard way to access the cycle counter.
index 2f3e8f2..ecefaff 100644 (file)
@@ -33,7 +33,7 @@ static inline void set_fs(mm_segment_t fs)
        set_thread_flag(TIF_FSCHECK);
 }
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 #define user_addr_max() (current->thread.addr_limit.seg)
 
 /*
index fb81fea..df01d73 100644 (file)
@@ -241,7 +241,8 @@ static u64 vread_hvclock(void)
 }
 #endif
 
-static inline u64 __arch_get_hw_counter(s32 clock_mode)
+static inline u64 __arch_get_hw_counter(s32 clock_mode,
+                                       const struct vdso_data *vd)
 {
        if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
                return (u64)rdtsc_ordered();
index d117553..c3daf0a 100644 (file)
@@ -875,8 +875,6 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
         */
        BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
 
-       local_irq_save(flags);
-
        /*
         * Map the page without the global bit, as TLB flushing is done with
         * flush_tlb_mm_range(), which is intended for non-global PTEs.
@@ -893,6 +891,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
         */
        VM_BUG_ON(!ptep);
 
+       local_irq_save(flags);
+
        pte = mk_pte(pages[0], pgprot);
        set_pte_at(poking_mm, poking_addr, ptep, pte);
 
@@ -942,8 +942,8 @@ static void *__text_poke(void *addr, const void *opcode, size_t len)
         */
        BUG_ON(memcmp(addr, opcode, len));
 
-       pte_unmap_unlock(ptep, ptl);
        local_irq_restore(flags);
+       pte_unmap_unlock(ptep, ptl);
        return addr;
 }
 
index ccf726c..5f943b9 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/proto.h>
 #include <asm/traps.h>
 #include <asm/apic.h>
+#include <asm/acpi.h>
 #include <asm/io_apic.h>
 #include <asm/desc.h>
 #include <asm/hpet.h>
index 98c9bb7..780c702 100644 (file)
@@ -10,6 +10,7 @@
  * like self-ipi, etc...
  */
 #include <linux/cpumask.h>
+#include <linux/thread_info.h>
 
 #include <asm/apic.h>
 
index 38b5b51..98d015a 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/smp.h>
 
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 
 #include "local.h"
 
index d1fc62a..34a992e 100644 (file)
@@ -9,6 +9,7 @@
  *  Bits copied from original nmi.c file
  *
  */
+#include <linux/thread_info.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
 
index 6ca0f91..387154e 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/cpumask.h>
 #include <linux/smp.h>
+#include <asm/io_apic.h>
 
 #include "local.h"
 
index 04797f0..a997d84 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/jump_label.h>
 
+#include <asm/irq_vectors.h>
 #include <asm/apic.h>
 
 /* APIC flat 64 */
index 67b33d6..7bda71d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/errno.h>
 #include <linux/smp.h>
 
+#include <asm/io_apic.h>
 #include <asm/apic.h>
 #include <asm/acpi.h>
 
index 29f0e09..bd3835d 100644 (file)
@@ -8,6 +8,7 @@
  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
  * James Cleverdon.
  */
+#include <linux/thread_info.h>
 #include <asm/apic.h>
 
 #include "local.h"
index dba6a83..93792b4 100644 (file)
@@ -17,8 +17,7 @@ KCOV_INSTRUMENT_perf_event.o := n
 KCSAN_SANITIZE_common.o := n
 
 # Make sure load_percpu_segment has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_common.o                := $(nostackp)
+CFLAGS_common.o                := -fno-stack-protector
 
 obj-y                  := cacheinfo.o scattered.o topology.o
 obj-y                  += common.o
index 1da9b1c..0b2c039 100644 (file)
 
 #include <linux/interrupt.h>
 #include <asm/apic.h>
+#include <asm/cpufeatures.h>
 #include <asm/desc.h>
 #include <asm/hypervisor.h>
 #include <asm/idtentry.h>
 #include <asm/irq_regs.h>
 
-static uint32_t __init acrn_detect(void)
+static u32 __init acrn_detect(void)
 {
-       return hypervisor_cpuid_base("ACRNACRNACRN\0\0", 0);
+       return hypervisor_cpuid_base("ACRNACRNACRN", 0);
 }
 
 static void __init acrn_init_platform(void)
@@ -29,12 +30,7 @@ static void __init acrn_init_platform(void)
 
 static bool acrn_x2apic_available(void)
 {
-       /*
-        * x2apic is not supported for now. Future enablement will have to check
-        * X86_FEATURE_X2APIC to determine whether x2apic is supported in the
-        * guest.
-        */
-       return false;
+       return boot_cpu_has(X86_FEATURE_X2APIC);
 }
 
 static void (*acrn_intr_handler)(void);
index d4806ea..dcc3d94 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/cpu.h>
 #include <asm/spec-ctrl.h>
 #include <asm/smp.h>
+#include <asm/numa.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
 #include <asm/debugreg.h>
index f0b743a..d3f0db4 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/intel-family.h>
 #include <asm/e820/api.h>
 #include <asm/hypervisor.h>
+#include <asm/tlbflush.h>
 
 #include "cpu.h"
 
@@ -1549,7 +1550,12 @@ static ssize_t l1tf_show_state(char *buf)
 
 static ssize_t itlb_multihit_show_state(char *buf)
 {
-       if (itlb_multihit_kvm_mitigation)
+       if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
+           !boot_cpu_has(X86_FEATURE_VMX))
+               return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
+       else if (!(cr4_read_shadow() & X86_CR4_VMXE))
+               return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
+       else if (itlb_multihit_kvm_mitigation)
                return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
        else
                return sprintf(buf, "KVM: Vulnerable\n");
index 965474d..c5d6f17 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/mtrr.h>
 #include <asm/hwcap2.h>
 #include <linux/numa.h>
+#include <asm/numa.h>
 #include <asm/asm.h>
 #include <asm/bugs.h>
 #include <asm/cpu.h>
index 4e28c1f..ac6c30e 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <asm/cpu.h>
 #include <asm/smp.h>
+#include <asm/numa.h>
 #include <asm/cacheinfo.h>
 #include <asm/spec-ctrl.h>
 #include <asm/delay.h>
index b6b7b38..59a1e3c 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/cmdline.h>
 #include <asm/traps.h>
 #include <asm/resctrl.h>
+#include <asm/numa.h>
 
 #ifdef CONFIG_X86_64
 #include <linux/topology.h>
index af94f05..3112544 100644 (file)
@@ -361,13 +361,6 @@ static void __init ms_hyperv_init_platform(void)
 #endif
 }
 
-void hv_setup_sched_clock(void *sched_clock)
-{
-#ifdef CONFIG_PARAVIRT
-       pv_ops.time.sched_clock = sched_clock;
-#endif
-}
-
 const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
        .name                   = "Microsoft Hyper-V",
        .detect                 = ms_hyperv_platform,
index fd87b59..a8f3af2 100644 (file)
@@ -230,7 +230,7 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
        int ret = 0;
 
        /* Exclude the low 1M because it is always reserved */
-       ret = crash_exclude_mem_range(cmem, 0, 1<<20);
+       ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
        if (ret)
                return ret;
 
index 8d85e00..a0e8fc7 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/irqdomain.h>
 #include <asm/hpet.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 #include <asm/pci_x86.h>
 #include <asm/setup.h>
 #include <asm/i8259.h>
index 7a2bf88..038e19c 100644 (file)
@@ -611,6 +611,10 @@ static void check_xstate_against_struct(int nr)
  * This essentially double-checks what the cpu told us about
  * how large the XSAVE buffer needs to be.  We are recalculating
  * it to be safe.
+ *
+ * Dynamic XSAVE features allocate their own buffers and are not
+ * covered by these checks. Only the size of the buffer for task->fpu
+ * is checked here.
  */
 static void do_extra_xstate_size_checks(void)
 {
@@ -673,6 +677,33 @@ static unsigned int __init get_xsaves_size(void)
        return ebx;
 }
 
+/*
+ * Get the total size of the enabled xstates without the dynamic supervisor
+ * features.
+ */
+static unsigned int __init get_xsaves_size_no_dynamic(void)
+{
+       u64 mask = xfeatures_mask_dynamic();
+       unsigned int size;
+
+       if (!mask)
+               return get_xsaves_size();
+
+       /* Disable dynamic features. */
+       wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
+
+       /*
+        * Ask the hardware what size is required of the buffer.
+        * This is the size required for the task->fpu buffer.
+        */
+       size = get_xsaves_size();
+
+       /* Re-enable dynamic features so XSAVES will work on them again. */
+       wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
+
+       return size;
+}
+
 static unsigned int __init get_xsave_size(void)
 {
        unsigned int eax, ebx, ecx, edx;
@@ -710,7 +741,7 @@ static int __init init_xstate_size(void)
        xsave_size = get_xsave_size();
 
        if (boot_cpu_has(X86_FEATURE_XSAVES))
-               possible_xstate_size = get_xsaves_size();
+               possible_xstate_size = get_xsaves_size_no_dynamic();
        else
                possible_xstate_size = xsave_size;
 
index f66a6b9..7ed84c2 100644 (file)
@@ -134,38 +134,7 @@ SYM_CODE_START(startup_32)
        movl %eax,pa(initial_page_table+0xffc)
 #endif
 
-#ifdef CONFIG_PARAVIRT
-       /* This is can only trip for a broken bootloader... */
-       cmpw $0x207, pa(boot_params + BP_version)
-       jb .Ldefault_entry
-
-       /* Paravirt-compatible boot parameters.  Look to see what architecture
-               we're booting under. */
-       movl pa(boot_params + BP_hardware_subarch), %eax
-       cmpl $num_subarch_entries, %eax
-       jae .Lbad_subarch
-
-       movl pa(subarch_entries)(,%eax,4), %eax
-       subl $__PAGE_OFFSET, %eax
-       jmp *%eax
-
-.Lbad_subarch:
-SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK)
-       /* Unknown implementation; there's really
-          nothing we can do at this point. */
-       ud2a
-
-       __INITDATA
-
-subarch_entries:
-       .long .Ldefault_entry           /* normal x86/PC */
-       .long xen_entry                 /* Xen hypervisor */
-       .long .Ldefault_entry           /* Moorestown MID */
-num_subarch_entries = (. - subarch_entries) / 4
-.previous
-#else
        jmp .Ldefault_entry
-#endif /* CONFIG_PARAVIRT */
 SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
index dd73135..beb1bad 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/timer.h>
 #include <asm/hw_irq.h>
 #include <asm/desc.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
 #include <asm/apic.h>
 #include <asm/setup.h>
 #include <asm/i8259.h>
index 6eb8b50..4eb8f2d 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/reboot.h>
 #include <linux/serial_8250.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
 #include <asm/cpu.h>
 #include <asm/hypervisor.h>
 #include <asm/i8259.h>
index c27b82b..411af4a 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/smp.h>
 #include <linux/pci.h>
 
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
 #include <asm/irqdomain.h>
 #include <asm/mtrr.h>
 #include <asm/mpspec.h>
index d6f9467..9afefe3 100644 (file)
@@ -390,7 +390,7 @@ unsigned long x86_fsgsbase_read_task(struct task_struct *task,
                 */
                mutex_lock(&task->mm->context.lock);
                ldt = task->mm->context.ldt;
-               if (unlikely(idx >= ldt->nr_entries))
+               if (unlikely(!ldt || idx >= ldt->nr_entries))
                        base = 0;
                else
                        base = get_desc_base(ldt->entries + idx);
index b9a68d8..3511736 100644 (file)
@@ -25,6 +25,7 @@
 #include <xen/xen.h>
 
 #include <asm/apic.h>
+#include <asm/numa.h>
 #include <asm/bios_ebda.h>
 #include <asm/bugs.h>
 #include <asm/cpu.h>
index b8810eb..0a2ec80 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <asm/io_apic.h>
 #include <asm/cpu.h>
 
 static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
index 4fec6f3..6555a85 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/thread_info.h>
 
 #include <asm/apic.h>
 #include <asm/cpu_device_id.h>
@@ -133,10 +134,15 @@ static const struct freq_desc freq_desc_ann = {
        .mask = 0x0f,
 };
 
-/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */
+/*
+ * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz
+ * Frequency step for Lightning Mountain SoC is fixed to 78 MHz,
+ * so all the frequency entries are 78000.
+ */
 static const struct freq_desc freq_desc_lgm = {
        .use_msr_plat = true,
-       .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
+       .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000,
+                  78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 },
        .mask = 0x0f,
 };
 
index 539ea1c..3394528 100644 (file)
@@ -10667,11 +10667,17 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
 {
        struct kvm_kernel_irqfd *irqfd =
                container_of(cons, struct kvm_kernel_irqfd, consumer);
+       int ret;
 
        irqfd->producer = prod;
+       kvm_arch_start_assignment(irqfd->kvm);
+       ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
+                                        prod->irq, irqfd->gsi, 1);
+
+       if (ret)
+               kvm_arch_end_assignment(irqfd->kvm);
 
-       return kvm_x86_ops.update_pi_irte(irqfd->kvm,
-                                          prod->irq, irqfd->gsi, 1);
+       return ret;
 }
 
 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
@@ -10694,6 +10700,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
        if (ret)
                printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
                       " fails: %d\n", irqfd->consumer.token, ret);
+
+       kvm_arch_end_assignment(irqfd->kvm);
 }
 
 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
index 6110bce..d46fff1 100644 (file)
@@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_cmdline.o = -pg
 endif
 
-CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
+CFLAGS_cmdline.o := -fno-stack-protector
 endif
 
 inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
index f7fd0e8..5864219 100644 (file)
@@ -22,10 +22,9 @@ obj-y                                :=  init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
 obj-y                          += pat/
 
 # Make sure __phys_addr has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_physaddr.o              := $(nostackp)
-CFLAGS_setup_nx.o              := $(nostackp)
-CFLAGS_mem_encrypt_identity.o  := $(nostackp)
+CFLAGS_physaddr.o              := -fno-stack-protector
+CFLAGS_setup_nx.o              := -fno-stack-protector
+CFLAGS_mem_encrypt_identity.o  := -fno-stack-protector
 
 CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
 
index 0c7643d..35f1498 100644 (file)
@@ -1139,7 +1139,7 @@ void do_user_addr_fault(struct pt_regs *regs,
        struct vm_area_struct *vma;
        struct task_struct *tsk;
        struct mm_struct *mm;
-       vm_fault_t fault, major = 0;
+       vm_fault_t fault;
        unsigned int flags = FAULT_FLAG_DEFAULT;
 
        tsk = current;
@@ -1291,8 +1291,7 @@ good_area:
         * userland). The return to userland is identified whenever
         * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
         */
-       fault = handle_mm_fault(vma, address, flags);
-       major |= fault & VM_FAULT_MAJOR;
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        /* Quick path to respond to signals */
        if (fault_signal_pending(fault, regs)) {
@@ -1319,18 +1318,6 @@ good_area:
                return;
        }
 
-       /*
-        * Major/minor page fault accounting. If any of the events
-        * returned VM_FAULT_MAJOR, we account it as a major fault.
-        */
-       if (major) {
-               tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-       } else {
-               tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
-       }
-
        check_v8086_mode(regs, address, tsk);
 }
 NOKPROBE_SYMBOL(do_user_addr_fault);
index 4cb9584..7c05525 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/cpu_entry_area.h>
 #include <asm/init.h>
 #include <asm/pgtable_areas.h>
+#include <asm/numa.h>
 
 #include "mm_internal.h"
 
index 3b246ae..a4ac13c 100644 (file)
@@ -1452,6 +1452,15 @@ static unsigned long probe_memory_block_size(void)
                goto done;
        }
 
+       /*
+        * Use max block size to minimize overhead on bare metal, where
+        * alignment for memory hotplug isn't a concern.
+        */
+       if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+               bz = MAX_BLOCK_SIZE;
+               goto done;
+       }
+
        /* Find the largest allowed block size that aligns to memory end */
        for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
                if (IS_ALIGNED(boot_mem_end, bz))
index b05f45e..aa76ec2 100644 (file)
@@ -929,5 +929,4 @@ int memory_add_physaddr_to_nid(u64 start)
                nid = numa_meminfo.blk[0].nid;
        return nid;
 }
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 #endif
index 9f9aad4..89395a5 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/xen/pci.h>
 #include <asm/xen/cpuid.h>
 #include <asm/apic.h>
+#include <asm/acpi.h>
 #include <asm/i8259.h>
 
 static int xen_pcifront_enable_irq(struct pci_dev *dev)
index 37923d7..6907b52 100644 (file)
@@ -3,8 +3,7 @@ OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
 
 # __restore_processor_state() restores %gs after S3 resume and so should not
 # itself be stack-protected
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_cpu.o   := $(nostackp)
+CFLAGS_cpu.o   := -fno-stack-protector
 
 obj-$(CONFIG_PM_SLEEP)         += cpu.o
 obj-$(CONFIG_HIBERNATION)      += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o
index 088bd76..95ea17a 100644 (file)
@@ -32,9 +32,9 @@ KCOV_INSTRUMENT := n
 # make up the standalone purgatory.ro
 
 PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
 PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
-PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector)
+PURGATORY_CFLAGS += -fno-stack-protector
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
@@ -64,6 +64,9 @@ CFLAGS_sha256.o                       += $(PURGATORY_CFLAGS)
 CFLAGS_REMOVE_string.o         += $(PURGATORY_CFLAGS_REMOVE)
 CFLAGS_string.o                        += $(PURGATORY_CFLAGS)
 
+AFLAGS_REMOVE_setup-x86_$(BITS).o      += -Wa,-gdwarf-2
+AFLAGS_REMOVE_entry64.o                        += -Wa,-gdwarf-2
+
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
 
index 0caddd6..5943387 100644 (file)
@@ -42,7 +42,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
 # optimize sibling calls.
 #
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \
        -fno-omit-frame-pointer -foptimize-sibling-calls
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
index 1aded63..218acbd 100644 (file)
@@ -19,6 +19,7 @@ config XEN_PV
        bool "Xen PV guest support"
        default y
        depends on XEN
+       depends on X86_64
        select PARAVIRT_XXL
        select XEN_HAVE_PVMMU
        select XEN_HAVE_VPMU
@@ -50,7 +51,7 @@ config XEN_PVHVM_SMP
 
 config XEN_512GB
        bool "Limit Xen pv-domain memory to 512GB"
-       depends on XEN_PV && X86_64
+       depends on XEN_PV
        default y
        help
          Limit paravirtualized user domains to 512GB of RAM.
index 084de77..fc5c5ba 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-asm.o := y
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
@@ -9,9 +9,8 @@ CFLAGS_REMOVE_irq.o = -pg
 endif
 
 # Make sure early boot has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_enlighten_pv.o          := $(nostackp)
-CFLAGS_mmu_pv.o                        := $(nostackp)
+CFLAGS_enlighten_pv.o          := -fno-stack-protector
+CFLAGS_mmu_pv.o                        := -fno-stack-protector
 
 obj-y                          += enlighten.o
 obj-y                          += mmu.o
@@ -34,7 +33,6 @@ obj-$(CONFIG_XEN_PV)          += mmu_pv.o
 obj-$(CONFIG_XEN_PV)           += irq.o
 obj-$(CONFIG_XEN_PV)           += multicalls.o
 obj-$(CONFIG_XEN_PV)           += xen-asm.o
-obj-$(CONFIG_XEN_PV)           += xen-asm_$(BITS).o
 
 obj-$(CONFIG_XEN_PVH)          += enlighten_pvh.o
 
index 5e53bfb..e82fd19 100644 (file)
@@ -1,8 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/init.h>
+#include <linux/thread_info.h>
 
 #include <asm/x86_init.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 #include <asm/xen/hypercall.h>
 
 #include <xen/xen.h>
@@ -58,10 +60,6 @@ static u32 xen_apic_read(u32 reg)
 
        if (reg == APIC_LVR)
                return 0x14;
-#ifdef CONFIG_X86_32
-       if (reg == APIC_LDR)
-               return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
-#endif
        if (reg != APIC_ID)
                return 0;
 
@@ -127,14 +125,6 @@ static int xen_phys_pkg_id(int initial_apic_id, int index_msb)
        return initial_apic_id >> index_msb;
 }
 
-#ifdef CONFIG_X86_32
-static int xen_x86_32_early_logical_apicid(int cpu)
-{
-       /* Match with APIC_LDR read. Otherwise setup_local_APIC complains. */
-       return 1 << cpu;
-}
-#endif
-
 static void xen_noop(void)
 {
 }
@@ -197,11 +187,6 @@ static struct apic xen_pv_apic = {
        .icr_write                      = xen_apic_icr_write,
        .wait_icr_idle                  = xen_noop,
        .safe_wait_icr_idle             = xen_safe_apic_wait_icr_idle,
-
-#ifdef CONFIG_X86_32
-       /* generic_processor_info and setup_local_APIC. */
-       .x86_32_early_logical_apicid    = xen_x86_32_early_logical_apicid,
-#endif
 };
 
 static void __init xen_apic_check(void)
index 3e89b00..9e87ab0 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/cpu.h>
 #include <asm/smp.h>
+#include <asm/io_apic.h>
 #include <asm/reboot.h>
 #include <asm/setup.h>
 #include <asm/idtentry.h>
index 2aab43a..22e741e 100644 (file)
@@ -119,14 +119,6 @@ static void __init xen_banner(void)
        printk(KERN_INFO "Xen version: %d.%d%s%s\n",
               version >> 16, version & 0xffff, extra.extraversion,
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
-
-#ifdef CONFIG_X86_32
-       pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
-               "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
-               "from the Linux kernel!\n"
-               "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
-               "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
-#endif
 }
 
 static void __init xen_pv_init_platform(void)
@@ -353,15 +345,13 @@ static void set_aliased_prot(void *v, pgprot_t prot)
        pte_t *ptep;
        pte_t pte;
        unsigned long pfn;
-       struct page *page;
        unsigned char dummy;
+       void *va;
 
        ptep = lookup_address((unsigned long)v, &level);
        BUG_ON(ptep == NULL);
 
        pfn = pte_pfn(*ptep);
-       page = pfn_to_page(pfn);
-
        pte = pfn_pte(pfn, prot);
 
        /*
@@ -391,14 +381,10 @@ static void set_aliased_prot(void *v, pgprot_t prot)
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
 
-       if (!PageHighMem(page)) {
-               void *av = __va(PFN_PHYS(pfn));
+       va = __va(PFN_PHYS(pfn));
 
-               if (av != v)
-                       if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
-                               BUG();
-       } else
-               kmap_flush_unused();
+       if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
+               BUG();
 
        preempt_enable();
 }
@@ -538,30 +524,12 @@ static void load_TLS_descriptor(struct thread_struct *t,
 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
 {
        /*
-        * XXX sleazy hack: If we're being called in a lazy-cpu zone
-        * and lazy gs handling is enabled, it means we're in a
-        * context switch, and %gs has just been saved.  This means we
-        * can zero it out to prevent faults on exit from the
-        * hypervisor if the next process has no %gs.  Either way, it
-        * has been saved, and the new value will get loaded properly.
-        * This will go away as soon as Xen has been modified to not
-        * save/restore %gs for normal hypercalls.
-        *
-        * On x86_64, this hack is not used for %gs, because gs points
-        * to KERNEL_GS_BASE (and uses it for PDA references), so we
-        * must not zero %gs on x86_64
-        *
-        * For x86_64, we need to zero %fs, otherwise we may get an
+        * In lazy mode we need to zero %fs, otherwise we may get an
         * exception between the new %fs descriptor being loaded and
         * %fs being effectively cleared at __switch_to().
         */
-       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
-#ifdef CONFIG_X86_32
-               lazy_load_gs(0);
-#else
+       if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)
                loadsegment(fs, 0);
-#endif
-       }
 
        xen_mc_batch();
 
@@ -572,13 +540,11 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 }
 
-#ifdef CONFIG_X86_64
 static void xen_load_gs_index(unsigned int idx)
 {
        if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
                BUG();
 }
-#endif
 
 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
                                const void *ptr)
@@ -597,7 +563,6 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
        preempt_enable();
 }
 
-#ifdef CONFIG_X86_64
 void noist_exc_debug(struct pt_regs *regs);
 
 DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
@@ -697,7 +662,6 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
 
        return true;
 }
-#endif
 
 static int cvt_gate_to_trap(int vector, const gate_desc *val,
                            struct trap_info *info)
@@ -710,10 +674,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
        info->vector = vector;
 
        addr = gate_offset(val);
-#ifdef CONFIG_X86_64
        if (!get_trap_addr((void **)&addr, val->bits.ist))
                return 0;
-#endif /* CONFIG_X86_64 */
        info->address = addr;
 
        info->cs = gate_segment(val);
@@ -958,15 +920,12 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
-#ifdef CONFIG_X86_64
        unsigned int which;
        u64 base;
-#endif
 
        ret = 0;
 
        switch (msr) {
-#ifdef CONFIG_X86_64
        case MSR_FS_BASE:               which = SEGBASE_FS; goto set;
        case MSR_KERNEL_GS_BASE:        which = SEGBASE_GS_USER; goto set;
        case MSR_GS_BASE:               which = SEGBASE_GS_KERNEL; goto set;
@@ -976,7 +935,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                if (HYPERVISOR_set_segment_base(which, base) != 0)
                        ret = -EIO;
                break;
-#endif
 
        case MSR_STAR:
        case MSR_CSTAR:
@@ -1058,9 +1016,7 @@ void __init xen_setup_vcpu_info_placement(void)
 static const struct pv_info xen_info __initconst = {
        .shared_kernel_pmd = 0,
 
-#ifdef CONFIG_X86_64
        .extra_user_64bit_cs = FLAT_USER_CS64,
-#endif
        .name = "Xen",
 };
 
@@ -1086,18 +1042,14 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .read_pmc = xen_read_pmc,
 
        .iret = xen_iret,
-#ifdef CONFIG_X86_64
        .usergs_sysret64 = xen_sysret64,
-#endif
 
        .load_tr_desc = paravirt_nop,
        .set_ldt = xen_set_ldt,
        .load_gdt = xen_load_gdt,
        .load_idt = xen_load_idt,
        .load_tls = xen_load_tls,
-#ifdef CONFIG_X86_64
        .load_gs_index = xen_load_gs_index,
-#endif
 
        .alloc_ldt = xen_alloc_ldt,
        .free_ldt = xen_free_ldt,
@@ -1364,15 +1316,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
        /* keep using Xen gdt for now; no urgent need to change it */
 
-#ifdef CONFIG_X86_32
-       pv_info.kernel_rpl = 1;
-       if (xen_feature(XENFEAT_supervisor_mode_kernel))
-               pv_info.kernel_rpl = 0;
-#else
        pv_info.kernel_rpl = 0;
-#endif
-       /* set the limit of our address space */
-       xen_reserve_top();
 
        /*
         * We used to do this in xen_arch_setup, but that is too late
@@ -1384,12 +1328,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
        if (rc != 0)
                xen_raw_printk("physdev_op failed %d\n", rc);
 
-#ifdef CONFIG_X86_32
-       /* set up basic CPUID stuff */
-       cpu_detect(&new_cpu_data);
-       set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
-       new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
-#endif
 
        if (xen_start_info->mod_start) {
            if (xen_start_info->flags & SIF_MOD_START_PFN)
@@ -1458,12 +1396,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_efi_init(&boot_params);
 
        /* Start the world */
-#ifdef CONFIG_X86_32
-       i386_start_kernel();
-#else
        cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
        x86_64_start_reservations((char *)__pa_symbol(&boot_params));
-#endif
 }
 
 static int xen_cpu_up_prepare_pv(unsigned int cpu)
index a58d9c6..3273c98 100644 (file)
 #include "mmu.h"
 #include "debugfs.h"
 
-#ifdef CONFIG_X86_32
-/*
- * Identity map, in addition to plain kernel map.  This needs to be
- * large enough to allocate page table pages to allocate the rest.
- * Each page can map 2MB.
- */
-#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
-static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
-#endif
-#ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
-#endif /* CONFIG_X86_64 */
 
 /*
  * Protects atomic reservation decrease/increase against concurrent increases.
@@ -280,10 +269,7 @@ static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
        if (!xen_batched_set_pte(ptep, pteval)) {
                /*
                 * Could call native_set_pte() here and trap and
-                * emulate the PTE write but with 32-bit guests this
-                * needs two traps (one for each of the two 32-bit
-                * words in the PTE) so do one hypercall directly
-                * instead.
+                * emulate the PTE write, but a hypercall is much cheaper.
                 */
                struct mmu_update u;
 
@@ -439,26 +425,6 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
        xen_set_pud_hyper(ptr, val);
 }
 
-#ifdef CONFIG_X86_PAE
-static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
-       trace_xen_mmu_set_pte_atomic(ptep, pte);
-       __xen_set_pte(ptep, pte);
-}
-
-static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
-       trace_xen_mmu_pte_clear(mm, addr, ptep);
-       __xen_set_pte(ptep, native_make_pte(0));
-}
-
-static void xen_pmd_clear(pmd_t *pmdp)
-{
-       trace_xen_mmu_pmd_clear(pmdp);
-       set_pmd(pmdp, __pmd(0));
-}
-#endif /* CONFIG_X86_PAE */
-
 __visible pmd_t xen_make_pmd(pmdval_t pmd)
 {
        pmd = pte_pfn_to_mfn(pmd);
@@ -466,7 +432,6 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
-#ifdef CONFIG_X86_64
 __visible pudval_t xen_pud_val(pud_t pud)
 {
        return pte_mfn_to_pfn(pud.pud);
@@ -571,27 +536,27 @@ __visible p4d_t xen_make_p4d(p4dval_t p4d)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
 #endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
-#endif /* CONFIG_X86_64 */
 
-static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
-               int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-               bool last, unsigned long limit)
+static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
+                        void (*func)(struct mm_struct *mm, struct page *,
+                                     enum pt_level),
+                        bool last, unsigned long limit)
 {
-       int i, nr, flush = 0;
+       int i, nr;
 
        nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
        for (i = 0; i < nr; i++) {
                if (!pmd_none(pmd[i]))
-                       flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
+                       (*func)(mm, pmd_page(pmd[i]), PT_PTE);
        }
-       return flush;
 }
 
-static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
-               int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-               bool last, unsigned long limit)
+static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
+                        void (*func)(struct mm_struct *mm, struct page *,
+                                     enum pt_level),
+                        bool last, unsigned long limit)
 {
-       int i, nr, flush = 0;
+       int i, nr;
 
        nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
        for (i = 0; i < nr; i++) {
@@ -602,29 +567,26 @@ static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
 
                pmd = pmd_offset(&pud[i], 0);
                if (PTRS_PER_PMD > 1)
-                       flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
-               flush |= xen_pmd_walk(mm, pmd, func,
-                               last && i == nr - 1, limit);
+                       (*func)(mm, virt_to_page(pmd), PT_PMD);
+               xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
        }
-       return flush;
 }
 
-static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
-               int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
-               bool last, unsigned long limit)
+static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
+                        void (*func)(struct mm_struct *mm, struct page *,
+                                     enum pt_level),
+                        bool last, unsigned long limit)
 {
-       int flush = 0;
        pud_t *pud;
 
 
        if (p4d_none(*p4d))
-               return flush;
+               return;
 
        pud = pud_offset(p4d, 0);
        if (PTRS_PER_PUD > 1)
-               flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
-       flush |= xen_pud_walk(mm, pud, func, last, limit);
-       return flush;
+               (*func)(mm, virt_to_page(pud), PT_PUD);
+       xen_pud_walk(mm, pud, func, last, limit);
 }
 
 /*
@@ -636,32 +598,27 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
  * will be STACK_TOP_MAX, but at boot we need to pin up to
  * FIXADDR_TOP.
  *
- * For 32-bit the important bit is that we don't pin beyond there,
- * because then we start getting into Xen's ptes.
- *
- * For 64-bit, we must skip the Xen hole in the middle of the address
- * space, just after the big x86-64 virtual hole.
+ * We must skip the Xen hole in the middle of the address space, just after
+ * the big x86-64 virtual hole.
  */
-static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
-                         int (*func)(struct mm_struct *mm, struct page *,
-                                     enum pt_level),
-                         unsigned long limit)
+static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
+                          void (*func)(struct mm_struct *mm, struct page *,
+                                       enum pt_level),
+                          unsigned long limit)
 {
-       int i, nr, flush = 0;
+       int i, nr;
        unsigned hole_low = 0, hole_high = 0;
 
        /* The limit is the last byte to be touched */
        limit--;
        BUG_ON(limit >= FIXADDR_TOP);
 
-#ifdef CONFIG_X86_64
        /*
         * 64-bit has a great big hole in the middle of the address
         * space, which contains the Xen mappings.
         */
        hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
        hole_high = pgd_index(GUARD_HOLE_END_ADDR);
-#endif
 
        nr = pgd_index(limit) + 1;
        for (i = 0; i < nr; i++) {
@@ -674,22 +631,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
                        continue;
 
                p4d = p4d_offset(&pgd[i], 0);
-               flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
+               xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
        }
 
        /* Do the top level last, so that the callbacks can use it as
           a cue to do final things like tlb flushes. */
-       flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
-
-       return flush;
+       (*func)(mm, virt_to_page(pgd), PT_PGD);
 }
 
-static int xen_pgd_walk(struct mm_struct *mm,
-                       int (*func)(struct mm_struct *mm, struct page *,
-                                   enum pt_level),
-                       unsigned long limit)
+static void xen_pgd_walk(struct mm_struct *mm,
+                        void (*func)(struct mm_struct *mm, struct page *,
+                                     enum pt_level),
+                        unsigned long limit)
 {
-       return __xen_pgd_walk(mm, mm->pgd, func, limit);
+       __xen_pgd_walk(mm, mm->pgd, func, limit);
 }
 
 /* If we're using split pte locks, then take the page's lock and
@@ -722,26 +677,17 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
        xen_extend_mmuext_op(&op);
 }
 
-static int xen_pin_page(struct mm_struct *mm, struct page *page,
-                       enum pt_level level)
+static void xen_pin_page(struct mm_struct *mm, struct page *page,
+                        enum pt_level level)
 {
        unsigned pgfl = TestSetPagePinned(page);
-       int flush;
-
-       if (pgfl)
-               flush = 0;              /* already pinned */
-       else if (PageHighMem(page))
-               /* kmaps need flushing if we found an unpinned
-                  highpage */
-               flush = 1;
-       else {
+
+       if (!pgfl) {
                void *pt = lowmem_page_address(page);
                unsigned long pfn = page_to_pfn(page);
                struct multicall_space mcs = __xen_mc_entry(0);
                spinlock_t *ptl;
 
-               flush = 0;
-
                /*
                 * We need to hold the pagetable lock between the time
                 * we make the pagetable RO and when we actually pin
@@ -778,8 +724,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
                        xen_mc_callback(xen_pte_unlock, ptl);
                }
        }
-
-       return flush;
 }
 
 /* This is called just after a mm has been created, but it has not
@@ -787,39 +731,22 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
    read-only, and can be pinned. */
 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
 {
+       pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
        trace_xen_mmu_pgd_pin(mm, pgd);
 
        xen_mc_batch();
 
-       if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
-               /* re-enable interrupts for flushing */
-               xen_mc_issue(0);
+       __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
 
-               kmap_flush_unused();
+       xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
 
-               xen_mc_batch();
+       if (user_pgd) {
+               xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
+               xen_do_pin(MMUEXT_PIN_L4_TABLE,
+                          PFN_DOWN(__pa(user_pgd)));
        }
 
-#ifdef CONFIG_X86_64
-       {
-               pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
-               xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
-
-               if (user_pgd) {
-                       xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
-                       xen_do_pin(MMUEXT_PIN_L4_TABLE,
-                                  PFN_DOWN(__pa(user_pgd)));
-               }
-       }
-#else /* CONFIG_X86_32 */
-#ifdef CONFIG_X86_PAE
-       /* Need to make sure unshared kernel PMD is pinnable */
-       xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
-                    PT_PMD);
-#endif
-       xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
-#endif /* CONFIG_X86_64 */
        xen_mc_issue(0);
 }
 
@@ -854,11 +781,10 @@ void xen_mm_pin_all(void)
        spin_unlock(&pgd_lock);
 }
 
-static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
-                                 enum pt_level level)
+static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
+                                  enum pt_level level)
 {
        SetPagePinned(page);
-       return 0;
 }
 
 /*
@@ -870,18 +796,16 @@ static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
 static void __init xen_after_bootmem(void)
 {
        static_branch_enable(&xen_struct_pages_ready);
-#ifdef CONFIG_X86_64
        SetPagePinned(virt_to_page(level3_user_vsyscall));
-#endif
        xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
 }
 
-static int xen_unpin_page(struct mm_struct *mm, struct page *page,
-                         enum pt_level level)
+static void xen_unpin_page(struct mm_struct *mm, struct page *page,
+                          enum pt_level level)
 {
        unsigned pgfl = TestClearPagePinned(page);
 
-       if (pgfl && !PageHighMem(page)) {
+       if (pgfl) {
                void *pt = lowmem_page_address(page);
                unsigned long pfn = page_to_pfn(page);
                spinlock_t *ptl = NULL;
@@ -912,36 +836,24 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
                        xen_mc_callback(xen_pte_unlock, ptl);
                }
        }
-
-       return 0;               /* never need to flush on unpin */
 }
 
 /* Release a pagetables pages back as normal RW */
 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
 {
+       pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
        trace_xen_mmu_pgd_unpin(mm, pgd);
 
        xen_mc_batch();
 
        xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
 
-#ifdef CONFIG_X86_64
-       {
-               pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
-               if (user_pgd) {
-                       xen_do_pin(MMUEXT_UNPIN_TABLE,
-                                  PFN_DOWN(__pa(user_pgd)));
-                       xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
-               }
+       if (user_pgd) {
+               xen_do_pin(MMUEXT_UNPIN_TABLE,
+                          PFN_DOWN(__pa(user_pgd)));
+               xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
        }
-#endif
-
-#ifdef CONFIG_X86_PAE
-       /* Need to make sure unshared kernel PMD is unpinned */
-       xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
-                      PT_PMD);
-#endif
 
        __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
 
@@ -1089,7 +1001,6 @@ static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
                BUG();
 }
 
-#ifdef CONFIG_X86_64
 static void __init xen_cleanhighmap(unsigned long vaddr,
                                    unsigned long vaddr_end)
 {
@@ -1273,17 +1184,15 @@ static void __init xen_pagetable_cleanhighmap(void)
        xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
        xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
 }
-#endif
 
 static void __init xen_pagetable_p2m_setup(void)
 {
        xen_vmalloc_p2m_tree();
 
-#ifdef CONFIG_X86_64
        xen_pagetable_p2m_free();
 
        xen_pagetable_cleanhighmap();
-#endif
+
        /* And revector! Bye bye old array */
        xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
 }
@@ -1420,6 +1329,8 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
 }
 static void xen_write_cr3(unsigned long cr3)
 {
+       pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
+
        BUG_ON(preemptible());
 
        xen_mc_batch();  /* disables interrupts */
@@ -1430,20 +1341,14 @@ static void xen_write_cr3(unsigned long cr3)
 
        __xen_write_cr3(true, cr3);
 
-#ifdef CONFIG_X86_64
-       {
-               pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
-               if (user_pgd)
-                       __xen_write_cr3(false, __pa(user_pgd));
-               else
-                       __xen_write_cr3(false, 0);
-       }
-#endif
+       if (user_pgd)
+               __xen_write_cr3(false, __pa(user_pgd));
+       else
+               __xen_write_cr3(false, 0);
 
        xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
 }
 
-#ifdef CONFIG_X86_64
 /*
  * At the start of the day - when Xen launches a guest, it has already
  * built pagetables for the guest. We diligently look over them
@@ -1478,49 +1383,39 @@ static void __init xen_write_cr3_init(unsigned long cr3)
 
        xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
 }
-#endif
 
 static int xen_pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd = mm->pgd;
-       int ret = 0;
+       struct page *page = virt_to_page(pgd);
+       pgd_t *user_pgd;
+       int ret = -ENOMEM;
 
        BUG_ON(PagePinned(virt_to_page(pgd)));
+       BUG_ON(page->private != 0);
 
-#ifdef CONFIG_X86_64
-       {
-               struct page *page = virt_to_page(pgd);
-               pgd_t *user_pgd;
+       user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       page->private = (unsigned long)user_pgd;
 
-               BUG_ON(page->private != 0);
-
-               ret = -ENOMEM;
-
-               user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               page->private = (unsigned long)user_pgd;
-
-               if (user_pgd != NULL) {
+       if (user_pgd != NULL) {
 #ifdef CONFIG_X86_VSYSCALL_EMULATION
-                       user_pgd[pgd_index(VSYSCALL_ADDR)] =
-                               __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
+               user_pgd[pgd_index(VSYSCALL_ADDR)] =
+                       __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
 #endif
-                       ret = 0;
-               }
-
-               BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
+               ret = 0;
        }
-#endif
+
+       BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
+
        return ret;
 }
 
 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-#ifdef CONFIG_X86_64
        pgd_t *user_pgd = xen_get_user_pgd(pgd);
 
        if (user_pgd)
                free_page((unsigned long)user_pgd);
-#endif
 }
 
 /*
@@ -1539,7 +1434,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  */
 __visible pte_t xen_make_pte_init(pteval_t pte)
 {
-#ifdef CONFIG_X86_64
        unsigned long pfn;
 
        /*
@@ -1553,7 +1447,7 @@ __visible pte_t xen_make_pte_init(pteval_t pte)
            pfn >= xen_start_info->first_p2m_pfn &&
            pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
                pte &= ~_PAGE_RW;
-#endif
+
        pte = pte_pfn_to_mfn(pte);
        return native_make_pte(pte);
 }
@@ -1561,13 +1455,6 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
 
 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
 {
-#ifdef CONFIG_X86_32
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_mfn(pte) != INVALID_P2M_ENTRY
-           && pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-#endif
        __xen_set_pte(ptep, pte);
 }
 
@@ -1642,20 +1529,14 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
                if (static_branch_likely(&xen_struct_pages_ready))
                        SetPagePinned(page);
 
-               if (!PageHighMem(page)) {
-                       xen_mc_batch();
+               xen_mc_batch();
 
-                       __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+               __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-                       if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
-                               __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+                       __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
-                       xen_mc_issue(PARAVIRT_LAZY_MMU);
-               } else {
-                       /* make sure there are no stray mappings of
-                          this page */
-                       kmap_flush_unused();
-               }
+               xen_mc_issue(PARAVIRT_LAZY_MMU);
        }
 }
 
@@ -1678,16 +1559,15 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
        trace_xen_mmu_release_ptpage(pfn, level, pinned);
 
        if (pinned) {
-               if (!PageHighMem(page)) {
-                       xen_mc_batch();
+               xen_mc_batch();
 
-                       if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
-                               __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+                       __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
 
-                       __set_pfn_prot(pfn, PAGE_KERNEL);
+               __set_pfn_prot(pfn, PAGE_KERNEL);
+
+               xen_mc_issue(PARAVIRT_LAZY_MMU);
 
-                       xen_mc_issue(PARAVIRT_LAZY_MMU);
-               }
                ClearPagePinned(page);
        }
 }
@@ -1702,7 +1582,6 @@ static void xen_release_pmd(unsigned long pfn)
        xen_release_ptpage(pfn, PT_PMD);
 }
 
-#ifdef CONFIG_X86_64
 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 {
        xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -1712,20 +1591,6 @@ static void xen_release_pud(unsigned long pfn)
 {
        xen_release_ptpage(pfn, PT_PUD);
 }
-#endif
-
-void __init xen_reserve_top(void)
-{
-#ifdef CONFIG_X86_32
-       unsigned long top = HYPERVISOR_VIRT_START;
-       struct xen_platform_parameters pp;
-
-       if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
-               top = pp.virt_start;
-
-       reserve_top_address(-top);
-#endif /* CONFIG_X86_32 */
-}
 
 /*
  * Like __va(), but returns address in the kernel mapping (which is
@@ -1733,11 +1598,7 @@ void __init xen_reserve_top(void)
  */
 static void * __init __ka(phys_addr_t paddr)
 {
-#ifdef CONFIG_X86_64
        return (void *)(paddr + __START_KERNEL_map);
-#else
-       return __va(paddr);
-#endif
 }
 
 /* Convert a machine address to physical address */
@@ -1771,56 +1632,7 @@ static void __init set_page_prot(void *addr, pgprot_t prot)
 {
        return set_page_prot_flags(addr, prot, UVMF_NONE);
 }
-#ifdef CONFIG_X86_32
-static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
-{
-       unsigned pmdidx, pteidx;
-       unsigned ident_pte;
-       unsigned long pfn;
-
-       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
-                                     PAGE_SIZE);
-
-       ident_pte = 0;
-       pfn = 0;
-       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
-               pte_t *pte_page;
-
-               /* Reuse or allocate a page of ptes */
-               if (pmd_present(pmd[pmdidx]))
-                       pte_page = m2v(pmd[pmdidx].pmd);
-               else {
-                       /* Check for free pte pages */
-                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
-                               break;
-
-                       pte_page = &level1_ident_pgt[ident_pte];
-                       ident_pte += PTRS_PER_PTE;
-
-                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
-               }
-
-               /* Install mappings */
-               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
-                       pte_t pte;
 
-                       if (pfn > max_pfn_mapped)
-                               max_pfn_mapped = pfn;
-
-                       if (!pte_none(pte_page[pteidx]))
-                               continue;
-
-                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
-                       pte_page[pteidx] = pte;
-               }
-       }
-
-       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
-               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
-
-       set_page_prot(pmd, PAGE_KERNEL_RO);
-}
-#endif
 void __init xen_setup_machphys_mapping(void)
 {
        struct xen_machphys_mapping mapping;
@@ -1831,13 +1643,8 @@ void __init xen_setup_machphys_mapping(void)
        } else {
                machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
        }
-#ifdef CONFIG_X86_32
-       WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
-               < machine_to_phys_mapping);
-#endif
 }
 
-#ifdef CONFIG_X86_64
 static void __init convert_pfn_mfn(void *v)
 {
        pte_t *pte = v;
@@ -2168,105 +1975,6 @@ void __init xen_relocate_p2m(void)
        xen_start_info->nr_p2m_frames = n_frames;
 }
 
-#else  /* !CONFIG_X86_64 */
-static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
-static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
-RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
-RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
-
-static void __init xen_write_cr3_init(unsigned long cr3)
-{
-       unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
-
-       BUG_ON(read_cr3_pa() != __pa(initial_page_table));
-       BUG_ON(cr3 != __pa(swapper_pg_dir));
-
-       /*
-        * We are switching to swapper_pg_dir for the first time (from
-        * initial_page_table) and therefore need to mark that page
-        * read-only and then pin it.
-        *
-        * Xen disallows sharing of kernel PMDs for PAE
-        * guests. Therefore we must copy the kernel PMD from
-        * initial_page_table into a new kernel PMD to be used in
-        * swapper_pg_dir.
-        */
-       swapper_kernel_pmd =
-               extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
-       copy_page(swapper_kernel_pmd, initial_kernel_pmd);
-       swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
-               __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
-       set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
-
-       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
-       xen_write_cr3(cr3);
-       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
-
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
-                         PFN_DOWN(__pa(initial_page_table)));
-       set_page_prot(initial_page_table, PAGE_KERNEL);
-       set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
-
-       pv_ops.mmu.write_cr3 = &xen_write_cr3;
-}
-
-/*
- * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
- * not the first page table in the page table pool.
- * Iterate through the initial page tables to find the real page table base.
- */
-static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
-{
-       phys_addr_t pt_base, paddr;
-       unsigned pmdidx;
-
-       pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
-
-       for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
-               if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
-                       paddr = m2p(pmd[pmdidx].pmd);
-                       pt_base = min(pt_base, paddr);
-               }
-
-       return pt_base;
-}
-
-void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
-{
-       pmd_t *kernel_pmd;
-
-       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
-
-       xen_pt_base = xen_find_pt_base(kernel_pmd);
-       xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
-
-       initial_kernel_pmd =
-               extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
-
-       max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
-
-       copy_page(initial_kernel_pmd, kernel_pmd);
-
-       xen_map_identity_early(initial_kernel_pmd, max_pfn);
-
-       copy_page(initial_page_table, pgd);
-       initial_page_table[KERNEL_PGD_BOUNDARY] =
-               __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
-
-       set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
-       set_page_prot(initial_page_table, PAGE_KERNEL_RO);
-       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
-
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
-                         PFN_DOWN(__pa(initial_page_table)));
-       xen_write_cr3(__pa(initial_page_table));
-
-       memblock_reserve(xen_pt_base, xen_pt_size);
-}
-#endif /* CONFIG_X86_64 */
-
 void __init xen_reserve_special_pages(void)
 {
        phys_addr_t paddr;
@@ -2300,12 +2008,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 
        switch (idx) {
        case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-#ifdef CONFIG_X86_32
-       case FIX_WP_TEST:
-# ifdef CONFIG_HIGHMEM
-       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
-# endif
-#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
        case VSYSCALL_PAGE:
 #endif
                /* All local page mappings */
@@ -2357,9 +2060,7 @@ static void __init xen_post_allocator_init(void)
        pv_ops.mmu.set_pte = xen_set_pte;
        pv_ops.mmu.set_pmd = xen_set_pmd;
        pv_ops.mmu.set_pud = xen_set_pud;
-#ifdef CONFIG_X86_64
        pv_ops.mmu.set_p4d = xen_set_p4d;
-#endif
 
        /* This will work as long as patching hasn't happened yet
           (which it hasn't) */
@@ -2367,15 +2068,11 @@ static void __init xen_post_allocator_init(void)
        pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
        pv_ops.mmu.release_pte = xen_release_pte;
        pv_ops.mmu.release_pmd = xen_release_pmd;
-#ifdef CONFIG_X86_64
        pv_ops.mmu.alloc_pud = xen_alloc_pud;
        pv_ops.mmu.release_pud = xen_release_pud;
-#endif
        pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
 
-#ifdef CONFIG_X86_64
        pv_ops.mmu.write_cr3 = &xen_write_cr3;
-#endif
 }
 
 static void xen_leave_lazy_mmu(void)
@@ -2420,17 +2117,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
        .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
 
-#ifdef CONFIG_X86_PAE
-       .set_pte_atomic = xen_set_pte_atomic,
-       .pte_clear = xen_pte_clear,
-       .pmd_clear = xen_pmd_clear,
-#endif /* CONFIG_X86_PAE */
        .set_pud = xen_set_pud_hyper,
 
        .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
        .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
 
-#ifdef CONFIG_X86_64
        .pud_val = PV_CALLEE_SAVE(xen_pud_val),
        .make_pud = PV_CALLEE_SAVE(xen_make_pud),
        .set_p4d = xen_set_p4d_hyper,
@@ -2442,7 +2133,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
        .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
 #endif
-#endif /* CONFIG_X86_64 */
 
        .activate_mm = xen_activate_mm,
        .dup_mmap = xen_dup_mmap,
index 0acba2c..be4151f 100644 (file)
@@ -379,12 +379,8 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
 
                if (type == P2M_TYPE_PFN || i < chunk) {
                        /* Use initial p2m page contents. */
-#ifdef CONFIG_X86_64
                        mfns = alloc_p2m_page();
                        copy_page(mfns, xen_p2m_addr + pfn);
-#else
-                       mfns = xen_p2m_addr + pfn;
-#endif
                        ptep = populate_extra_pte((unsigned long)(p2m + pfn));
                        set_pte(ptep,
                                pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
@@ -467,7 +463,7 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
  * Allocate new pmd(s). It is checked whether the old pmd is still in place.
  * If not, nothing is changed. This is okay as the only reason for allocating
  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
- * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
+ * pmd.
  */
 static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 {
index 3566e37..7eab14d 100644 (file)
@@ -32,7 +32,6 @@
 #include <xen/features.h>
 #include <xen/hvc-console.h>
 #include "xen-ops.h"
-#include "vdso.h"
 #include "mmu.h"
 
 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
@@ -545,13 +544,10 @@ static unsigned long __init xen_get_pages_limit(void)
 {
        unsigned long limit;
 
-#ifdef CONFIG_X86_32
-       limit = GB(64) / PAGE_SIZE;
-#else
        limit = MAXMEM / PAGE_SIZE;
        if (!xen_initial_domain() && xen_512gb_limit)
                limit = GB(512) / PAGE_SIZE;
-#endif
+
        return limit;
 }
 
@@ -722,17 +718,8 @@ static void __init xen_reserve_xen_mfnlist(void)
        if (!xen_is_e820_reserved(start, size))
                return;
 
-#ifdef CONFIG_X86_32
-       /*
-        * Relocating the p2m on 32 bit system to an arbitrary virtual address
-        * is not supported, so just give up.
-        */
-       xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
-       BUG();
-#else
        xen_relocate_p2m();
        memblock_free(start, size);
-#endif
 }
 
 /**
@@ -921,20 +908,6 @@ char * __init xen_memory_setup(void)
        return "Xen";
 }
 
-/*
- * Set the bit indicating "nosegneg" library variants should be used.
- * We only need to bother in pure 32-bit mode; compat 32-bit processes
- * can have un-truncated segments, so wrapping around is allowed.
- */
-static void __init fiddle_vdso(void)
-{
-#ifdef CONFIG_X86_32
-       u32 *mask = vdso_image_32.data +
-               vdso_image_32.sym_VDSO32_NOTE_MASK;
-       *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
-#endif
-}
-
 static int register_callback(unsigned type, const void *func)
 {
        struct callback_register callback = {
@@ -951,11 +924,7 @@ void xen_enable_sysenter(void)
        int ret;
        unsigned sysenter_feature;
 
-#ifdef CONFIG_X86_32
-       sysenter_feature = X86_FEATURE_SEP;
-#else
        sysenter_feature = X86_FEATURE_SYSENTER32;
-#endif
 
        if (!boot_cpu_has(sysenter_feature))
                return;
@@ -967,7 +936,6 @@ void xen_enable_sysenter(void)
 
 void xen_enable_syscall(void)
 {
-#ifdef CONFIG_X86_64
        int ret;
 
        ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
@@ -983,7 +951,6 @@ void xen_enable_syscall(void)
                if (ret != 0)
                        setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
        }
-#endif /* CONFIG_X86_64 */
 }
 
 static void __init xen_pvmmu_arch_setup(void)
@@ -1024,7 +991,6 @@ void __init xen_arch_setup(void)
        disable_cpuidle();
        disable_cpufreq();
        WARN_ON(xen_set_default_idle());
-       fiddle_vdso();
 #ifdef CONFIG_NUMA
        numa_off = 1;
 #endif
index f8d3944..f5e7db4 100644 (file)
@@ -1,4 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/thread_info.h>
 #include <asm/smp.h>
 
 #include <xen/events.h>
index 9ea598d..c2ac319 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/idtentry.h>
 #include <asm/desc.h>
 #include <asm/cpu.h>
+#include <asm/io_apic.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/vcpu.h>
@@ -210,15 +211,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
                 * sure the old memory can be recycled. */
                make_lowmem_page_readwrite(xen_initial_gdt);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Xen starts us with XEN_FLAT_RING1_DS, but linux code
-        * expects __USER_DS
-        */
-       loadsegment(ds, __USER_DS);
-       loadsegment(es, __USER_DS);
-#endif
-
        xen_filter_cpu_maps();
        xen_setup_vcpu_info_placement();
 
@@ -299,10 +291,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        gdt = get_cpu_gdt_rw(cpu);
 
-#ifdef CONFIG_X86_32
-       ctxt->user_regs.fs = __KERNEL_PERCPU;
-       ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
-#endif
        memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
 
        /*
@@ -340,12 +328,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->kernel_ss = __KERNEL_DS;
        ctxt->kernel_sp = task_top_of_stack(idle);
 
-#ifdef CONFIG_X86_32
-       ctxt->event_callback_cs     = __KERNEL_CS;
-       ctxt->failsafe_callback_cs  = __KERNEL_CS;
-#else
        ctxt->gs_base_kernel = per_cpu_offset(cpu);
-#endif
        ctxt->event_callback_eip    =
                (unsigned long)xen_asm_exc_xen_hypervisor_callback;
        ctxt->failsafe_callback_eip =
index 8303b58..cae9660 100644 (file)
@@ -1,11 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/types.h>
 
-#include <asm/fixmap.h>
-
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 
+#include <asm/fixmap.h>
+
 #include "xen-ops.h"
 
 void xen_pv_pre_suspend(void)
diff --git a/arch/x86/xen/vdso.h b/arch/x86/xen/vdso.h
deleted file mode 100644 (file)
index 873c54c..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Bit used for the pseudo-hwcap for non-negative segments.  We use
-   bit 1 to avoid bugs in some versions of glibc when bit 0 is
-   used; the choice is otherwise arbitrary. */
-#define VDSO_NOTE_NONEGSEG_BIT 1
index 508fe20..1cb0e84 100644 (file)
@@ -6,12 +6,18 @@
  * operations here; the indirect forms are better handled in C.
  */
 
+#include <asm/errno.h>
 #include <asm/asm-offsets.h>
 #include <asm/percpu.h>
 #include <asm/processor-flags.h>
-#include <asm/frame.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
 #include <asm/asm.h>
+#include <asm/frame.h>
 
+#include <xen/interface/xen.h>
+
+#include <linux/init.h>
 #include <linux/linkage.h>
 
 /*
@@ -76,11 +82,7 @@ SYM_FUNC_END(xen_save_fl_direct)
  */
 SYM_FUNC_START(xen_restore_fl_direct)
        FRAME_BEGIN
-#ifdef CONFIG_X86_64
        testw $X86_EFLAGS_IF, %di
-#else
-       testb $X86_EFLAGS_IF>>8, %ah
-#endif
        setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
        /*
         * Preempt here doesn't matter because that will deal with any
@@ -104,15 +106,6 @@ SYM_FUNC_END(xen_restore_fl_direct)
  */
 SYM_FUNC_START(check_events)
        FRAME_BEGIN
-#ifdef CONFIG_X86_32
-       push %eax
-       push %ecx
-       push %edx
-       call xen_force_evtchn_callback
-       pop %edx
-       pop %ecx
-       pop %eax
-#else
        push %rax
        push %rcx
        push %rdx
@@ -132,7 +125,6 @@ SYM_FUNC_START(check_events)
        pop %rdx
        pop %rcx
        pop %rax
-#endif
        FRAME_END
        ret
 SYM_FUNC_END(check_events)
@@ -151,3 +143,175 @@ SYM_FUNC_START(xen_read_cr2_direct)
        FRAME_END
        ret
 SYM_FUNC_END(xen_read_cr2_direct);
+
+.macro xen_pv_trap name
+SYM_CODE_START(xen_\name)
+       pop %rcx
+       pop %r11
+       jmp  \name
+SYM_CODE_END(xen_\name)
+_ASM_NOKPROBE(xen_\name)
+.endm
+
+xen_pv_trap asm_exc_divide_error
+xen_pv_trap asm_xenpv_exc_debug
+xen_pv_trap asm_exc_int3
+xen_pv_trap asm_xenpv_exc_nmi
+xen_pv_trap asm_exc_overflow
+xen_pv_trap asm_exc_bounds
+xen_pv_trap asm_exc_invalid_op
+xen_pv_trap asm_exc_device_not_available
+xen_pv_trap asm_exc_double_fault
+xen_pv_trap asm_exc_coproc_segment_overrun
+xen_pv_trap asm_exc_invalid_tss
+xen_pv_trap asm_exc_segment_not_present
+xen_pv_trap asm_exc_stack_segment
+xen_pv_trap asm_exc_general_protection
+xen_pv_trap asm_exc_page_fault
+xen_pv_trap asm_exc_spurious_interrupt_bug
+xen_pv_trap asm_exc_coprocessor_error
+xen_pv_trap asm_exc_alignment_check
+#ifdef CONFIG_X86_MCE
+xen_pv_trap asm_exc_machine_check
+#endif /* CONFIG_X86_MCE */
+xen_pv_trap asm_exc_simd_coprocessor_error
+#ifdef CONFIG_IA32_EMULATION
+xen_pv_trap entry_INT80_compat
+#endif
+xen_pv_trap asm_exc_xen_hypervisor_callback
+
+       __INIT
+SYM_CODE_START(xen_early_idt_handler_array)
+       i = 0
+       .rept NUM_EXCEPTION_VECTORS
+       pop %rcx
+       pop %r11
+       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
+       i = i + 1
+       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
+       .endr
+SYM_CODE_END(xen_early_idt_handler_array)
+       __FINIT
+
+hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
+/*
+ * Xen64 iret frame:
+ *
+ *     ss
+ *     rsp
+ *     rflags
+ *     cs
+ *     rip             <-- standard iret frame
+ *
+ *     flags
+ *
+ *     rcx             }
+ *     r11             }<-- pushed by hypercall page
+ * rsp->rax            }
+ */
+SYM_CODE_START(xen_iret)
+       pushq $0
+       jmp hypercall_iret
+SYM_CODE_END(xen_iret)
+
+SYM_CODE_START(xen_sysret64)
+       /*
+        * We're already on the usermode stack at this point, but
+        * still with the kernel gs, so we can easily switch back.
+        *
+        * tss.sp2 is scratch space.
+        */
+       movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
+       movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+       pushq $__USER_DS
+       pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
+       pushq %r11
+       pushq $__USER_CS
+       pushq %rcx
+
+       pushq $VGCF_in_syscall
+       jmp hypercall_iret
+SYM_CODE_END(xen_sysret64)
+
+/*
+ * Xen handles syscall callbacks much like ordinary exceptions, which
+ * means we have:
+ * - kernel gs
+ * - kernel rsp
+ * - an iret-like stack frame on the stack (including rcx and r11):
+ *     ss
+ *     rsp
+ *     rflags
+ *     cs
+ *     rip
+ *     r11
+ * rsp->rcx
+ */
+
+/* Normal 64-bit system call target */
+SYM_FUNC_START(xen_syscall_target)
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER_DS and __USER_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER_DS, 4*8(%rsp)
+       movq $__USER_CS, 1*8(%rsp)
+
+       jmp entry_SYSCALL_64_after_hwframe
+SYM_FUNC_END(xen_syscall_target)
+
+#ifdef CONFIG_IA32_EMULATION
+
+/* 32-bit compat syscall target */
+SYM_FUNC_START(xen_syscall32_target)
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER32_DS, 4*8(%rsp)
+       movq $__USER32_CS, 1*8(%rsp)
+
+       jmp entry_SYSCALL_compat_after_hwframe
+SYM_FUNC_END(xen_syscall32_target)
+
+/* 32-bit compat sysenter target */
+SYM_FUNC_START(xen_sysenter_target)
+       /*
+        * NB: Xen is polite and clears TF from EFLAGS for us.  This means
+        * that we don't need to guard against single step exceptions here.
+        */
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER32_DS, 4*8(%rsp)
+       movq $__USER32_CS, 1*8(%rsp)
+
+       jmp entry_SYSENTER_compat_after_hwframe
+SYM_FUNC_END(xen_sysenter_target)
+
+#else /* !CONFIG_IA32_EMULATION */
+
+SYM_FUNC_START_ALIAS(xen_syscall32_target)
+SYM_FUNC_START(xen_sysenter_target)
+       lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
+       mov $-ENOSYS, %rax
+       pushq $0
+       jmp hypercall_iret
+SYM_FUNC_END(xen_sysenter_target)
+SYM_FUNC_END_ALIAS(xen_syscall32_target)
+
+#endif /* CONFIG_IA32_EMULATION */
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
deleted file mode 100644 (file)
index 4757cec..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Asm versions of Xen pv-ops, suitable for direct use.
- *
- * We only bother with direct forms (ie, vcpu in pda) of the
- * operations here; the indirect forms are better handled in C.
- */
-
-#include <asm/thread_info.h>
-#include <asm/processor-flags.h>
-#include <asm/segment.h>
-#include <asm/asm.h>
-
-#include <xen/interface/xen.h>
-
-#include <linux/linkage.h>
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI  0x80000000
-
-/*
- * This is run where a normal iret would be run, with the same stack setup:
- *     8: eflags
- *     4: cs
- *     esp-> 0: eip
- *
- * This attempts to make sure that any pending events are dealt with
- * on return to usermode, but there is a small window in which an
- * event can happen just before entering usermode.  If the nested
- * interrupt ends up setting one of the TIF_WORK_MASK pending work
- * flags, they will not be tested again before returning to
- * usermode. This means that a process can end up with pending work,
- * which will be unprocessed until the process enters and leaves the
- * kernel again, which could be an unbounded amount of time.  This
- * means that a pending signal or reschedule event could be
- * indefinitely delayed.
- *
- * The fix is to notice a nested interrupt in the critical window, and
- * if one occurs, then fold the nested interrupt into the current
- * interrupt stack frame, and re-process it iteratively rather than
- * recursively.  This means that it will exit via the normal path, and
- * all pending work will be dealt with appropriately.
- *
- * Because the nested interrupt handler needs to deal with the current
- * stack state in whatever form its in, we keep things simple by only
- * using a single register which is pushed/popped on the stack.
- */
-
-.macro POP_FS
-1:
-       popw %fs
-.pushsection .fixup, "ax"
-2:     movw $0, (%esp)
-       jmp 1b
-.popsection
-       _ASM_EXTABLE(1b,2b)
-.endm
-
-SYM_CODE_START(xen_iret)
-       /* test eflags for special cases */
-       testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
-       jnz hyper_iret
-
-       push %eax
-       ESP_OFFSET=4    # bytes pushed onto stack
-
-       /* Store vcpu_info pointer for easy access */
-#ifdef CONFIG_SMP
-       pushw %fs
-       movl $(__KERNEL_PERCPU), %eax
-       movl %eax, %fs
-       movl %fs:xen_vcpu, %eax
-       POP_FS
-#else
-       movl %ss:xen_vcpu, %eax
-#endif
-
-       /* check IF state we're restoring */
-       testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
-
-       /*
-        * Maybe enable events.  Once this happens we could get a
-        * recursive event, so the critical region starts immediately
-        * afterwards.  However, if that happens we don't end up
-        * resuming the code, so we don't have to be worried about
-        * being preempted to another CPU.
-        */
-       setz %ss:XEN_vcpu_info_mask(%eax)
-xen_iret_start_crit:
-
-       /* check for unmasked and pending */
-       cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
-
-       /*
-        * If there's something pending, mask events again so we can
-        * jump back into exc_xen_hypervisor_callback. Otherwise do not
-        * touch XEN_vcpu_info_mask.
-        */
-       jne 1f
-       movb $1, %ss:XEN_vcpu_info_mask(%eax)
-
-1:     popl %eax
-
-       /*
-        * From this point on the registers are restored and the stack
-        * updated, so we don't need to worry about it if we're
-        * preempted
-        */
-iret_restore_end:
-
-       /*
-        * Jump to hypervisor_callback after fixing up the stack.
-        * Events are masked, so jumping out of the critical region is
-        * OK.
-        */
-       je xen_asm_exc_xen_hypervisor_callback
-
-1:     iret
-xen_iret_end_crit:
-       _ASM_EXTABLE(1b, asm_iret_error)
-
-hyper_iret:
-       /* put this out of line since its very rarely used */
-       jmp hypercall_page + __HYPERVISOR_iret * 32
-SYM_CODE_END(xen_iret)
-
-       .globl xen_iret_start_crit, xen_iret_end_crit
-
-/*
- * This is called by xen_asm_exc_xen_hypervisor_callback in entry_32.S when it sees
- * that the EIP at the time of interrupt was between
- * xen_iret_start_crit and xen_iret_end_crit.
- *
- * The stack format at this point is:
- *     ----------------
- *      ss             : (ss/esp may be present if we came from usermode)
- *      esp            :
- *      eflags         }  outer exception info
- *      cs             }
- *      eip            }
- *     ----------------
- *      eax            :  outer eax if it hasn't been restored
- *     ----------------
- *      eflags         }
- *      cs             }  nested exception info
- *      eip            }
- *      return address : (into xen_asm_exc_xen_hypervisor_callback)
- *
- * In order to deliver the nested exception properly, we need to discard the
- * nested exception frame such that when we handle the exception, we do it
- * in the context of the outer exception rather than starting a new one.
- *
- * The only caveat is that if the outer eax hasn't been restored yet (i.e.
- * it's still on stack), we need to restore its value here.
-*/
-.pushsection .noinstr.text, "ax"
-SYM_CODE_START(xen_iret_crit_fixup)
-       /*
-        * Paranoia: Make sure we're really coming from kernel space.
-        * One could imagine a case where userspace jumps into the
-        * critical range address, but just before the CPU delivers a
-        * PF, it decides to deliver an interrupt instead.  Unlikely?
-        * Definitely.  Easy to avoid?  Yes.
-        */
-       testb $2, 2*4(%esp)             /* nested CS */
-       jnz 2f
-
-       /*
-        * If eip is before iret_restore_end then stack
-        * hasn't been restored yet.
-        */
-       cmpl $iret_restore_end, 1*4(%esp)
-       jae 1f
-
-       movl 4*4(%esp), %eax            /* load outer EAX */
-       ret $4*4                        /* discard nested EIP, CS, and EFLAGS as
-                                        * well as the just restored EAX */
-
-1:
-       ret $3*4                        /* discard nested EIP, CS, and EFLAGS */
-
-2:
-       ret
-SYM_CODE_END(xen_iret_crit_fixup)
-.popsection
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
deleted file mode 100644 (file)
index aab1d99..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Asm versions of Xen pv-ops, suitable for direct use.
- *
- * We only bother with direct forms (ie, vcpu in pda) of the
- * operations here; the indirect forms are better handled in C.
- */
-
-#include <asm/errno.h>
-#include <asm/percpu.h>
-#include <asm/processor-flags.h>
-#include <asm/segment.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/asm.h>
-
-#include <xen/interface/xen.h>
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-
-.macro xen_pv_trap name
-SYM_CODE_START(xen_\name)
-       pop %rcx
-       pop %r11
-       jmp  \name
-SYM_CODE_END(xen_\name)
-_ASM_NOKPROBE(xen_\name)
-.endm
-
-xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_xenpv_exc_debug
-xen_pv_trap asm_exc_int3
-xen_pv_trap asm_xenpv_exc_nmi
-xen_pv_trap asm_exc_overflow
-xen_pv_trap asm_exc_bounds
-xen_pv_trap asm_exc_invalid_op
-xen_pv_trap asm_exc_device_not_available
-xen_pv_trap asm_exc_double_fault
-xen_pv_trap asm_exc_coproc_segment_overrun
-xen_pv_trap asm_exc_invalid_tss
-xen_pv_trap asm_exc_segment_not_present
-xen_pv_trap asm_exc_stack_segment
-xen_pv_trap asm_exc_general_protection
-xen_pv_trap asm_exc_page_fault
-xen_pv_trap asm_exc_spurious_interrupt_bug
-xen_pv_trap asm_exc_coprocessor_error
-xen_pv_trap asm_exc_alignment_check
-#ifdef CONFIG_X86_MCE
-xen_pv_trap asm_exc_machine_check
-#endif /* CONFIG_X86_MCE */
-xen_pv_trap asm_exc_simd_coprocessor_error
-#ifdef CONFIG_IA32_EMULATION
-xen_pv_trap entry_INT80_compat
-#endif
-xen_pv_trap asm_exc_xen_hypervisor_callback
-
-       __INIT
-SYM_CODE_START(xen_early_idt_handler_array)
-       i = 0
-       .rept NUM_EXCEPTION_VECTORS
-       pop %rcx
-       pop %r11
-       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
-       i = i + 1
-       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
-       .endr
-SYM_CODE_END(xen_early_idt_handler_array)
-       __FINIT
-
-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
-/*
- * Xen64 iret frame:
- *
- *     ss
- *     rsp
- *     rflags
- *     cs
- *     rip             <-- standard iret frame
- *
- *     flags
- *
- *     rcx             }
- *     r11             }<-- pushed by hypercall page
- * rsp->rax            }
- */
-SYM_CODE_START(xen_iret)
-       pushq $0
-       jmp hypercall_iret
-SYM_CODE_END(xen_iret)
-
-SYM_CODE_START(xen_sysret64)
-       /*
-        * We're already on the usermode stack at this point, but
-        * still with the kernel gs, so we can easily switch back.
-        *
-        * tss.sp2 is scratch space.
-        */
-       movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
-       movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-
-       pushq $__USER_DS
-       pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
-       pushq %r11
-       pushq $__USER_CS
-       pushq %rcx
-
-       pushq $VGCF_in_syscall
-       jmp hypercall_iret
-SYM_CODE_END(xen_sysret64)
-
-/*
- * Xen handles syscall callbacks much like ordinary exceptions, which
- * means we have:
- * - kernel gs
- * - kernel rsp
- * - an iret-like stack frame on the stack (including rcx and r11):
- *     ss
- *     rsp
- *     rflags
- *     cs
- *     rip
- *     r11
- * rsp->rcx
- */
-
-/* Normal 64-bit system call target */
-SYM_FUNC_START(xen_syscall_target)
-       popq %rcx
-       popq %r11
-
-       /*
-        * Neither Xen nor the kernel really knows what the old SS and
-        * CS were.  The kernel expects __USER_DS and __USER_CS, so
-        * report those values even though Xen will guess its own values.
-        */
-       movq $__USER_DS, 4*8(%rsp)
-       movq $__USER_CS, 1*8(%rsp)
-
-       jmp entry_SYSCALL_64_after_hwframe
-SYM_FUNC_END(xen_syscall_target)
-
-#ifdef CONFIG_IA32_EMULATION
-
-/* 32-bit compat syscall target */
-SYM_FUNC_START(xen_syscall32_target)
-       popq %rcx
-       popq %r11
-
-       /*
-        * Neither Xen nor the kernel really knows what the old SS and
-        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
-        * report those values even though Xen will guess its own values.
-        */
-       movq $__USER32_DS, 4*8(%rsp)
-       movq $__USER32_CS, 1*8(%rsp)
-
-       jmp entry_SYSCALL_compat_after_hwframe
-SYM_FUNC_END(xen_syscall32_target)
-
-/* 32-bit compat sysenter target */
-SYM_FUNC_START(xen_sysenter_target)
-       /*
-        * NB: Xen is polite and clears TF from EFLAGS for us.  This means
-        * that we don't need to guard against single step exceptions here.
-        */
-       popq %rcx
-       popq %r11
-
-       /*
-        * Neither Xen nor the kernel really knows what the old SS and
-        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
-        * report those values even though Xen will guess its own values.
-        */
-       movq $__USER32_DS, 4*8(%rsp)
-       movq $__USER32_CS, 1*8(%rsp)
-
-       jmp entry_SYSENTER_compat_after_hwframe
-SYM_FUNC_END(xen_sysenter_target)
-
-#else /* !CONFIG_IA32_EMULATION */
-
-SYM_FUNC_START_ALIAS(xen_syscall32_target)
-SYM_FUNC_START(xen_sysenter_target)
-       lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
-       mov $-ENOSYS, %rax
-       pushq $0
-       jmp hypercall_iret
-SYM_FUNC_END(xen_sysenter_target)
-SYM_FUNC_END_ALIAS(xen_syscall32_target)
-
-#endif /* CONFIG_IA32_EMULATION */
index 1ba601d..2d7c8f3 100644 (file)
@@ -35,13 +35,8 @@ SYM_CODE_START(startup_xen)
        rep __ASM_SIZE(stos)
 
        mov %_ASM_SI, xen_start_info
-#ifdef CONFIG_X86_64
        mov initial_stack(%rip), %rsp
-#else
-       mov initial_stack, %esp
-#endif
 
-#ifdef CONFIG_X86_64
        /* Set up %gs.
         *
         * The base of %gs always points to fixed_percpu_data.  If the
@@ -53,7 +48,6 @@ SYM_CODE_START(startup_xen)
        movq    $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
        cdq
        wrmsr
-#endif
 
        call xen_start_kernel
 SYM_CODE_END(startup_xen)
index 53b224f..45d556f 100644 (file)
@@ -33,7 +33,6 @@ void xen_setup_mfn_list_list(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
 void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_reserve_top(void);
 void __init xen_reserve_special_pages(void);
 void __init xen_pt_check_e820(void);
 
index 0ebc982..f7c775d 100644 (file)
@@ -26,7 +26,7 @@ $(obj)/Image.o: $(obj)/../vmlinux.bin $(OBJS)
                $(OBJS) $@
 
 $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
-       $(Q)$(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) \
+       $(Q)$(LD) $(KBUILD_LDFLAGS) \
                -T $(obj)/boot.lds \
                --build-id=none \
                -o $@ $(obj)/Image.o
index e57f0d0..b975811 100644 (file)
@@ -35,7 +35,7 @@
 #define get_fs()       (current->thread.current_ds)
 #define set_fs(val)    (current->thread.current_ds = (val))
 
-#define segment_eq(a, b)       ((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define __kernel_ok (uaccess_kernel())
 #define __user_ok(addr, size) \
index d216ccb..6276e3c 100644 (file)
 204    common  quotactl                        sys_quotactl
 # 205 was old nfsservctl
 205    common  nfsservctl                      sys_ni_syscall
-206    common  _sysctl                         sys_sysctl
+206    common  _sysctl                         sys_ni_syscall
 207    common  bdflush                         sys_bdflush
 208    common  uname                           sys_newuname
 209    common  sysinfo                         sys_sysinfo
index c128dcc..7666408 100644 (file)
@@ -72,6 +72,9 @@ void do_page_fault(struct pt_regs *regs)
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
 retry:
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
@@ -107,7 +110,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(vma, address, flags);
+       fault = handle_mm_fault(vma, address, flags, regs);
 
        if (fault_signal_pending(fault, regs))
                return;
@@ -122,10 +125,6 @@ good_area:
                BUG();
        }
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
-               if (fault & VM_FAULT_MAJOR)
-                       current->maj_flt++;
-               else
-                       current->min_flt++;
                if (fault & VM_FAULT_RETRY) {
                        flags |= FAULT_FLAG_TRIED;
 
@@ -139,12 +138,6 @@ good_area:
        }
 
        mmap_read_unlock(mm);
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-       if (flags & VM_FAULT_MAJOR)
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-       else
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
-
        return;
 
        /* Something tried to access memory that isn't in our memory map..
index 6e1543c..53abb5c 100644 (file)
@@ -308,9 +308,16 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        flush_rq->mq_ctx = first_rq->mq_ctx;
        flush_rq->mq_hctx = first_rq->mq_hctx;
 
-       if (!q->elevator)
+       if (!q->elevator) {
                flush_rq->tag = first_rq->tag;
-       else
+
+               /*
+                * We borrow data request's driver tag, so have to mark
+                * this flush request as INFLIGHT for avoiding double
+                * account of this driver tag
+                */
+               flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
+       } else
                flush_rq->internal_tag = first_rq->internal_tag;
 
        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
index 521c29b..413e0b5 100644 (file)
@@ -406,7 +406,7 @@ struct ioc {
        enum ioc_running                running;
        atomic64_t                      vtime_rate;
 
-       seqcount_t                      period_seqcount;
+       seqcount_spinlock_t             period_seqcount;
        u32                             period_at;      /* wallclock starttime */
        u64                             period_at_vtime; /* vtime starttime */
 
@@ -873,7 +873,6 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
 
 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
 {
-       lockdep_assert_held(&ioc->lock);
        WARN_ON_ONCE(ioc->running != IOC_RUNNING);
 
        write_seqcount_begin(&ioc->period_seqcount);
@@ -2001,7 +2000,7 @@ static int blk_iocost_init(struct request_queue *q)
 
        ioc->running = IOC_IDLE;
        atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
-       seqcount_init(&ioc->period_seqcount);
+       seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
        ioc->period_at = ktime_to_us(ktime_get());
        atomic64_set(&ioc->cur_period, 0);
        atomic_set(&ioc->hweight_gen, 0);
index 019e09b..0d1811e 100644 (file)
@@ -47,6 +47,15 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                op = REQ_OP_DISCARD;
        }
 
+       /* In case the discard granularity isn't set by buggy device driver */
+       if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
+               char dev_name[BDEVNAME_SIZE];
+
+               bdevname(bdev, dev_name);
+               pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
+               return -EOPNOTSUPP;
+       }
+
        bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
index c176784..21efa78 100644 (file)
@@ -554,12 +554,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
 
        INIT_LIST_HEAD(&ctx->tsgl_list);
        ctx->len = len;
-       ctx->used = 0;
-       atomic_set(&ctx->rcvused, 0);
-       ctx->more = 0;
-       ctx->merge = 0;
-       ctx->enc = 0;
-       ctx->aead_assoclen = 0;
        crypto_init_wait(&ctx->wait);
 
        ask->private = ctx;
index 5c112b2..478f3b8 100644 (file)
@@ -329,6 +329,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
        ctx = sock_kmalloc(sk, len, GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
+       memset(ctx, 0, len);
 
        ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
                               GFP_KERNEL);
@@ -336,16 +337,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
                sock_kfree_s(sk, ctx, len);
                return -ENOMEM;
        }
-
        memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
 
        INIT_LIST_HEAD(&ctx->tsgl_list);
        ctx->len = len;
-       ctx->used = 0;
-       atomic_set(&ctx->rcvused, 0);
-       ctx->more = 0;
-       ctx->merge = 0;
-       ctx->enc = 0;
        crypto_init_wait(&ctx->wait);
 
        ask->private = ctx;
index ba2612e..4c34837 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/platform_data/clk-st.h>
+#include <linux/platform_data/clk-fch.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/clkdev.h>
@@ -79,11 +79,12 @@ static int misc_check_res(struct acpi_resource *ares, void *data)
        return !acpi_dev_resource_memory(ares, &res);
 }
 
-static int st_misc_setup(struct apd_private_data *pdata)
+static int fch_misc_setup(struct apd_private_data *pdata)
 {
        struct acpi_device *adev = pdata->adev;
+       const union acpi_object *obj;
        struct platform_device *clkdev;
-       struct st_clk_data *clk_data;
+       struct fch_clk_data *clk_data;
        struct resource_entry *rentry;
        struct list_head resource_list;
        int ret;
@@ -98,6 +99,9 @@ static int st_misc_setup(struct apd_private_data *pdata)
        if (ret < 0)
                return -ENOENT;
 
+       acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj);
+       clk_data->is_rv = obj->integer.value;
+
        list_for_each_entry(rentry, &resource_list, node) {
                clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
                                              resource_size(rentry->res));
@@ -106,7 +110,7 @@ static int st_misc_setup(struct apd_private_data *pdata)
 
        acpi_dev_free_resource_list(&resource_list);
 
-       clkdev = platform_device_register_data(&adev->dev, "clk-st",
+       clkdev = platform_device_register_data(&adev->dev, "clk-fch",
                                               PLATFORM_DEVID_NONE, clk_data,
                                               sizeof(*clk_data));
        return PTR_ERR_OR_ZERO(clkdev);
@@ -135,8 +139,8 @@ static const struct apd_device_desc cz_uart_desc = {
        .properties = uart_properties,
 };
 
-static const struct apd_device_desc st_misc_desc = {
-       .setup = st_misc_setup,
+static const struct apd_device_desc fch_misc_desc = {
+       .setup = fch_misc_setup,
 };
 #endif
 
@@ -239,7 +243,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
        { "AMD0020", APD_ADDR(cz_uart_desc) },
        { "AMDI0020", APD_ADDR(cz_uart_desc) },
        { "AMD0030", },
-       { "AMD0040", APD_ADDR(st_misc_desc)},
+       { "AMD0040", APD_ADDR(fch_misc_desc)},
+       { "HYGO0010", APD_ADDR(wt_i2c_desc) },
 #endif
 #ifdef CONFIG_ARM64
        { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
index 7c138a4..26dd208 100644 (file)
@@ -73,6 +73,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
 }
 EXPORT_SYMBOL(to_nfit_uuid);
 
+static const guid_t *to_nfit_bus_uuid(int family)
+{
+       if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
+                       "only secondary bus families can be translated\n"))
+               return NULL;
+       /*
+        * The index of bus UUIDs starts immediately following the last
+        * NVDIMM/leaf family.
+        */
+       return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
+}
+
 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
 {
        struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -362,24 +374,8 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
 {
        static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
                [NVDIMM_FAMILY_INTEL] = {
-                       [NVDIMM_INTEL_GET_MODES] = 2,
-                       [NVDIMM_INTEL_GET_FWINFO] = 2,
-                       [NVDIMM_INTEL_START_FWUPDATE] = 2,
-                       [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
-                       [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
-                       [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
-                       [NVDIMM_INTEL_SET_THRESHOLD] = 2,
-                       [NVDIMM_INTEL_INJECT_ERROR] = 2,
-                       [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
-                       [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
-                       [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
-                       [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
-                       [NVDIMM_INTEL_FREEZE_LOCK] = 2,
-                       [NVDIMM_INTEL_SECURE_ERASE] = 2,
-                       [NVDIMM_INTEL_OVERWRITE] = 2,
-                       [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
-                       [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
-                       [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
+                       [NVDIMM_INTEL_GET_MODES ...
+                               NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
                },
        };
        u8 id;
@@ -406,7 +402,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
 }
 
 static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
-               struct nd_cmd_pkg *call_pkg)
+               struct nd_cmd_pkg *call_pkg, int *family)
 {
        if (call_pkg) {
                int i;
@@ -417,6 +413,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
                for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
                        if (call_pkg->nd_reserved2[i])
                                return -EINVAL;
+               *family = call_pkg->nd_family;
                return call_pkg->nd_command;
        }
 
@@ -450,13 +447,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        acpi_handle handle;
        const guid_t *guid;
        int func, rc, i;
+       int family = 0;
 
        if (cmd_rc)
                *cmd_rc = -EINVAL;
 
        if (cmd == ND_CMD_CALL)
                call_pkg = buf;
-       func = cmd_to_func(nfit_mem, cmd, call_pkg);
+       func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
        if (func < 0)
                return func;
 
@@ -478,9 +476,17 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 
                cmd_name = nvdimm_bus_cmd_name(cmd);
                cmd_mask = nd_desc->cmd_mask;
-               dsm_mask = nd_desc->bus_dsm_mask;
+               if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
+                       family = call_pkg->nd_family;
+                       if (!test_bit(family, &nd_desc->bus_family_mask))
+                               return -EINVAL;
+                       dsm_mask = acpi_desc->family_dsm_mask[family];
+                       guid = to_nfit_bus_uuid(family);
+               } else {
+                       dsm_mask = acpi_desc->bus_dsm_mask;
+                       guid = to_nfit_uuid(NFIT_DEV_BUS);
+               }
                desc = nd_cmd_bus_desc(cmd);
-               guid = to_nfit_uuid(NFIT_DEV_BUS);
                handle = adev->handle;
                dimm_name = "bus";
        }
@@ -516,8 +522,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                in_buf.buffer.length = call_pkg->nd_size_in;
        }
 
-       dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
-               dimm_name, cmd, func, in_buf.buffer.length);
+       dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
+               dimm_name, cmd, family, func, in_buf.buffer.length);
        if (payload_dumpable(nvdimm, func))
                print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
                                in_buf.buffer.pointer,
@@ -1238,8 +1244,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev,
 {
        struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
        struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
-       return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
+       return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
 }
 static struct device_attribute dev_attr_bus_dsm_mask =
                __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
@@ -1385,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
        struct device *dev = container_of(kobj, struct device, kobj);
        struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 
-       if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
-               return 0;
+       if (a == &dev_attr_scrub.attr)
+               return ars_supported(nvdimm_bus) ? a->mode : 0;
+
+       if (a == &dev_attr_firmware_activate_noidle.attr)
+               return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
+
        return a->mode;
 }
 
@@ -1395,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = {
        &dev_attr_scrub.attr,
        &dev_attr_hw_error_scrub.attr,
        &dev_attr_bus_dsm_mask.attr,
+       &dev_attr_firmware_activate_noidle.attr,
        NULL,
 };
 
@@ -1823,6 +1835,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem)
 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                struct nfit_mem *nfit_mem, u32 device_handle)
 {
+       struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
        struct acpi_device *adev, *adev_dimm;
        struct device *dev = acpi_desc->dev;
        unsigned long dsm_mask, label_mask;
@@ -1834,6 +1847,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
        /* nfit test assumes 1:1 relationship between commands and dsms */
        nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
        nfit_mem->family = NVDIMM_FAMILY_INTEL;
+       set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
 
        if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
                sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
@@ -1886,10 +1900,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
         * Note, that checking for function0 (bit0) tells us if any commands
         * are reachable through this GUID.
         */
+       clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
        for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
-               if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+               if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
+                       set_bit(i, &nd_desc->dimm_family_mask);
                        if (family < 0 || i == default_dsm_family)
                                family = i;
+               }
 
        /* limit the supported commands to those that are publicly documented */
        nfit_mem->family = family;
@@ -2007,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
        }
 }
 
+static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
+               struct nfit_mem *nfit_mem)
+{
+       unsigned long mask;
+       struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+       struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+       if (!nd_desc->fw_ops)
+               return NULL;
+
+       if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
+               return NULL;
+
+       mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
+       if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+               return NULL;
+
+       return intel_fw_ops;
+}
+
 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 {
        struct nfit_mem *nfit_mem;
@@ -2083,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
                                acpi_nfit_dimm_attribute_groups,
                                flags, cmd_mask, flush ? flush->hint_count : 0,
                                nfit_mem->flush_wpq, &nfit_mem->id[0],
-                               acpi_nfit_get_security_ops(nfit_mem->family));
+                               acpi_nfit_get_security_ops(nfit_mem->family),
+                               acpi_nfit_get_fw_ops(nfit_mem));
                if (!nvdimm)
                        return -ENOMEM;
 
@@ -2147,12 +2185,23 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
 {
        struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
        const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
+       unsigned long dsm_mask, *mask;
        struct acpi_device *adev;
-       unsigned long dsm_mask;
        int i;
 
-       nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
-       nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
+       set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
+       set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
+
+       /* enable nfit_test to inject bus command emulation */
+       if (acpi_desc->bus_cmd_force_en) {
+               nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+               mask = &nd_desc->bus_family_mask;
+               if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
+                       set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
+                       nd_desc->fw_ops = intel_bus_fw_ops;
+               }
+       }
+
        adev = to_acpi_dev(acpi_desc);
        if (!adev)
                return;
@@ -2160,7 +2209,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
        for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
                if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
                        set_bit(i, &nd_desc->cmd_mask);
-       set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
 
        dsm_mask =
                (1 << ND_CMD_ARS_CAP) |
@@ -2173,7 +2221,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
                (1 << NFIT_CMD_ARS_INJECT_GET);
        for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
                if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
-                       set_bit(i, &nd_desc->bus_dsm_mask);
+                       set_bit(i, &acpi_desc->bus_dsm_mask);
+
+       /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
+       dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+       guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
+       mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+       for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+               if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
+                       set_bit(i, mask);
+
+       if (*mask == dsm_mask) {
+               set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
+               nd_desc->fw_ops = intel_bus_fw_ops;
+       }
 }
 
 static ssize_t range_index_show(struct device *dev,
@@ -3273,7 +3334,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
 {
        struct nfit_spa *nfit_spa;
-       int rc;
+       int rc, do_sched_ars = 0;
 
        set_bit(ARS_VALID, &acpi_desc->scrub_flags);
        list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
@@ -3285,7 +3346,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                }
        }
 
-       list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+       list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
                switch (nfit_spa_type(nfit_spa->spa)) {
                case NFIT_SPA_VOLATILE:
                case NFIT_SPA_PM:
@@ -3293,6 +3354,13 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                        rc = ars_register(acpi_desc, nfit_spa);
                        if (rc)
                                return rc;
+
+                       /*
+                        * Kick off background ARS if at least one
+                        * region successfully registered ARS
+                        */
+                       if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
+                               do_sched_ars++;
                        break;
                case NFIT_SPA_BDW:
                        /* nothing to register */
@@ -3311,8 +3379,10 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                        /* don't register unknown regions */
                        break;
                }
+       }
 
-       sched_ars(acpi_desc);
+       if (do_sched_ars)
+               sched_ars(acpi_desc);
        return 0;
 }
 
@@ -3485,7 +3555,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
        return 0;
 }
 
-/* prevent security commands from being issued via ioctl */
+/*
+ * Prevent security and firmware activate commands from being issued via
+ * ioctl.
+ */
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
                struct nvdimm *nvdimm, unsigned int cmd, void *buf)
 {
@@ -3496,10 +3569,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
                        call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
                func = call_pkg->nd_command;
                if (func > NVDIMM_CMD_MAX ||
-                   (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+                   (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
                        return -EOPNOTSUPP;
        }
 
+       /* block all non-nfit bus commands */
+       if (!nvdimm && cmd == ND_CMD_CALL &&
+                       call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
+               return -EOPNOTSUPP;
+
        return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
 }
 
@@ -3791,6 +3869,7 @@ static __init int nfit_init(void)
        guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
        guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
        guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
+       guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
 
        nfit_wq = create_singlethread_workqueue("nfit");
        if (!nfit_wq)
index 1113b67..8dd792a 100644 (file)
@@ -7,6 +7,48 @@
 #include "intel.h"
 #include "nfit.h"
 
+static ssize_t firmware_activate_noidle_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+       return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
+}
+
+static ssize_t firmware_activate_noidle_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+       ssize_t rc;
+       bool val;
+
+       rc = kstrtobool(buf, &val);
+       if (rc)
+               return rc;
+       if (val != acpi_desc->fwa_noidle)
+               acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
+       acpi_desc->fwa_noidle = val;
+       return size;
+}
+DEVICE_ATTR_RW(firmware_activate_noidle);
+
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
+{
+       struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+       unsigned long *mask;
+
+       if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
+               return false;
+
+       mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+       return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
+}
+
 static unsigned long intel_security_flags(struct nvdimm *nvdimm,
                enum nvdimm_passphrase_type ptype)
 {
@@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = {
 };
 
 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
+
+static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
+               struct nd_intel_bus_fw_activate_businfo *info)
+{
+       struct {
+               struct nd_cmd_pkg pkg;
+               struct nd_intel_bus_fw_activate_businfo cmd;
+       } nd_cmd = {
+               .pkg = {
+                       .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
+                       .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+                       .nd_size_out =
+                               sizeof(struct nd_intel_bus_fw_activate_businfo),
+                       .nd_fw_size =
+                               sizeof(struct nd_intel_bus_fw_activate_businfo),
+               },
+       };
+       int rc;
+
+       rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+                       NULL);
+       *info = nd_cmd.cmd;
+       return rc;
+}
+
+/* The fw_ops expect to be called with the nvdimm_bus_lock() held */
+static enum nvdimm_fwa_state intel_bus_fwa_state(
+               struct nvdimm_bus_descriptor *nd_desc)
+{
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+       struct nd_intel_bus_fw_activate_businfo info;
+       struct device *dev = acpi_desc->dev;
+       enum nvdimm_fwa_state state;
+       int rc;
+
+       /*
+        * It should not be possible for platform firmware to return
+        * busy because activate is a synchronous operation. Treat it
+        * similar to invalid, i.e. always refresh / poll the status.
+        */
+       switch (acpi_desc->fwa_state) {
+       case NVDIMM_FWA_INVALID:
+       case NVDIMM_FWA_BUSY:
+               break;
+       default:
+               /* check if capability needs to be refreshed */
+               if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
+                       break;
+               return acpi_desc->fwa_state;
+       }
+
+       /* Refresh with platform firmware */
+       rc = intel_bus_fwa_businfo(nd_desc, &info);
+       if (rc)
+               return NVDIMM_FWA_INVALID;
+
+       switch (info.state) {
+       case ND_INTEL_FWA_IDLE:
+               state = NVDIMM_FWA_IDLE;
+               break;
+       case ND_INTEL_FWA_BUSY:
+               state = NVDIMM_FWA_BUSY;
+               break;
+       case ND_INTEL_FWA_ARMED:
+               if (info.activate_tmo > info.max_quiesce_tmo)
+                       state = NVDIMM_FWA_ARM_OVERFLOW;
+               else
+                       state = NVDIMM_FWA_ARMED;
+               break;
+       default:
+               dev_err_once(dev, "invalid firmware activate state %d\n",
+                               info.state);
+               return NVDIMM_FWA_INVALID;
+       }
+
+       /*
+        * Capability data is available in the same payload as state. It
+        * is expected to be static.
+        */
+       if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
+               if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
+                       acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
+               else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
+                       /*
+                        * Skip hibernate cycle by default if platform
+                        * indicates that it does not need devices to be
+                        * quiesced.
+                        */
+                       acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
+               } else
+                       acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
+       }
+
+       acpi_desc->fwa_state = state;
+
+       return state;
+}
+
+static enum nvdimm_fwa_capability intel_bus_fwa_capability(
+               struct nvdimm_bus_descriptor *nd_desc)
+{
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+       if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
+               return acpi_desc->fwa_cap;
+
+       if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
+               return acpi_desc->fwa_cap;
+
+       return NVDIMM_FWA_CAP_INVALID;
+}
+
+static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
+{
+       struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+       struct {
+               struct nd_cmd_pkg pkg;
+               struct nd_intel_bus_fw_activate cmd;
+       } nd_cmd = {
+               .pkg = {
+                       .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+                       .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+                       .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+                       .nd_size_out =
+                               sizeof(struct nd_intel_bus_fw_activate),
+                       .nd_fw_size =
+                               sizeof(struct nd_intel_bus_fw_activate),
+               },
+               /*
+                * Even though activate is run from a suspended context,
+                * for safety, still ask platform firmware to force
+                * quiesce devices by default. Let a module
+                * parameter override that policy.
+                */
+               .cmd = {
+                       .iodev_state = acpi_desc->fwa_noidle
+                               ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+                               : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
+               },
+       };
+       int rc;
+
+       switch (intel_bus_fwa_state(nd_desc)) {
+       case NVDIMM_FWA_ARMED:
+       case NVDIMM_FWA_ARM_OVERFLOW:
+               break;
+       default:
+               return -ENXIO;
+       }
+
+       rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
+                       NULL);
+
+       /*
+        * Whether the command succeeded, or failed, the agent checking
+        * for the result needs to query the DIMMs individually.
+        * Increment the activation count to invalidate all the DIMM
+        * states at once (it's otherwise not possible to take
+        * acpi_desc->init_mutex in this context)
+        */
+       acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+       acpi_desc->fwa_count++;
+
+       dev_dbg(acpi_desc->dev, "result: %d\n", rc);
+
+       return rc;
+}
+
+static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
+       .activate_state = intel_bus_fwa_state,
+       .capability = intel_bus_fwa_capability,
+       .activate = intel_bus_fwa_activate,
+};
+
+const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
+
+static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
+               struct nd_intel_fw_activate_dimminfo *info)
+{
+       struct {
+               struct nd_cmd_pkg pkg;
+               struct nd_intel_fw_activate_dimminfo cmd;
+       } nd_cmd = {
+               .pkg = {
+                       .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
+                       .nd_family = NVDIMM_FAMILY_INTEL,
+                       .nd_size_out =
+                               sizeof(struct nd_intel_fw_activate_dimminfo),
+                       .nd_fw_size =
+                               sizeof(struct nd_intel_fw_activate_dimminfo),
+               },
+       };
+       int rc;
+
+       rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+       *info = nd_cmd.cmd;
+       return rc;
+}
+
+static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
+{
+       struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+       struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+       struct nd_intel_fw_activate_dimminfo info;
+       int rc;
+
+       /*
+        * Similar to the bus state, since activate is synchronous the
+        * busy state should resolve within the context of 'activate'.
+        */
+       switch (nfit_mem->fwa_state) {
+       case NVDIMM_FWA_INVALID:
+       case NVDIMM_FWA_BUSY:
+               break;
+       default:
+               /* If no activations occurred the old state is still valid */
+               if (nfit_mem->fwa_count == acpi_desc->fwa_count)
+                       return nfit_mem->fwa_state;
+       }
+
+       rc = intel_fwa_dimminfo(nvdimm, &info);
+       if (rc)
+               return NVDIMM_FWA_INVALID;
+
+       switch (info.state) {
+       case ND_INTEL_FWA_IDLE:
+               nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
+               break;
+       case ND_INTEL_FWA_BUSY:
+               nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
+               break;
+       case ND_INTEL_FWA_ARMED:
+               nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
+               break;
+       default:
+               nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+               break;
+       }
+
+       switch (info.result) {
+       case ND_INTEL_DIMM_FWA_NONE:
+               nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
+               break;
+       case ND_INTEL_DIMM_FWA_SUCCESS:
+               nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
+               break;
+       case ND_INTEL_DIMM_FWA_NOTSTAGED:
+               nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
+               break;
+       case ND_INTEL_DIMM_FWA_NEEDRESET:
+               nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
+               break;
+       case ND_INTEL_DIMM_FWA_MEDIAFAILED:
+       case ND_INTEL_DIMM_FWA_ABORT:
+       case ND_INTEL_DIMM_FWA_NOTSUPP:
+       case ND_INTEL_DIMM_FWA_ERROR:
+       default:
+               nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
+               break;
+       }
+
+       nfit_mem->fwa_count = acpi_desc->fwa_count;
+
+       return nfit_mem->fwa_state;
+}
+
+static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
+{
+       struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+       struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+
+       if (nfit_mem->fwa_count == acpi_desc->fwa_count
+                       && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
+               return nfit_mem->fwa_result;
+
+       if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
+               return nfit_mem->fwa_result;
+
+       return NVDIMM_FWA_RESULT_INVALID;
+}
+
+static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
+{
+       struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+       struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
+       struct {
+               struct nd_cmd_pkg pkg;
+               struct nd_intel_fw_activate_arm cmd;
+       } nd_cmd = {
+               .pkg = {
+                       .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+                       .nd_family = NVDIMM_FAMILY_INTEL,
+                       .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+                       .nd_size_out =
+                               sizeof(struct nd_intel_fw_activate_arm),
+                       .nd_fw_size =
+                               sizeof(struct nd_intel_fw_activate_arm),
+               },
+               .cmd = {
+                       .activate_arm = arm == NVDIMM_FWA_ARM
+                               ? ND_INTEL_DIMM_FWA_ARM
+                               : ND_INTEL_DIMM_FWA_DISARM,
+               },
+       };
+       int rc;
+
+       switch (intel_fwa_state(nvdimm)) {
+       case NVDIMM_FWA_INVALID:
+               return -ENXIO;
+       case NVDIMM_FWA_BUSY:
+               return -EBUSY;
+       case NVDIMM_FWA_IDLE:
+               if (arm == NVDIMM_FWA_DISARM)
+                       return 0;
+               break;
+       case NVDIMM_FWA_ARMED:
+               if (arm == NVDIMM_FWA_ARM)
+                       return 0;
+               break;
+       default:
+               return -ENXIO;
+       }
+
+       /*
+        * Invalidate the bus-level state, now that we're committed to
+        * changing the 'arm' state.
+        */
+       acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
+       nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
+
+       rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+
+       dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
+                       ? "arm" : "disarm", rc);
+       return rc;
+}
+
+static const struct nvdimm_fw_ops __intel_fw_ops = {
+       .activate_state = intel_fwa_state,
+       .activate_result = intel_fwa_result,
+       .arm = intel_fwa_arm,
+};
+
+const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
index 0aca682..b768234 100644 (file)
@@ -111,4 +111,65 @@ struct nd_intel_master_secure_erase {
        u8 passphrase[ND_INTEL_PASSPHRASE_SIZE];
        u32 status;
 } __packed;
+
+#define ND_INTEL_FWA_IDLE 0
+#define ND_INTEL_FWA_ARMED 1
+#define ND_INTEL_FWA_BUSY 2
+
+#define ND_INTEL_DIMM_FWA_NONE 0
+#define ND_INTEL_DIMM_FWA_NOTSTAGED 1
+#define ND_INTEL_DIMM_FWA_SUCCESS 2
+#define ND_INTEL_DIMM_FWA_NEEDRESET 3
+#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4
+#define ND_INTEL_DIMM_FWA_ABORT 5
+#define ND_INTEL_DIMM_FWA_NOTSUPP 6
+#define ND_INTEL_DIMM_FWA_ERROR 7
+
+struct nd_intel_fw_activate_dimminfo {
+       u32 status;
+       u16 result;
+       u8 state;
+       u8 reserved[7];
+} __packed;
+
+#define ND_INTEL_DIMM_FWA_ARM 1
+#define ND_INTEL_DIMM_FWA_DISARM 0
+
+struct nd_intel_fw_activate_arm {
+       u8 activate_arm;
+       u32 status;
+} __packed;
+
+/* Root device command payloads */
+#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0)
+#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1)
+#define ND_INTEL_BUS_FWA_CAP_RESET     (1 << 2)
+
+struct nd_intel_bus_fw_activate_businfo {
+       u32 status;
+       u16 reserved;
+       u8 state;
+       u8 capability;
+       u64 activate_tmo;
+       u64 cpu_quiesce_tmo;
+       u64 io_quiesce_tmo;
+       u64 max_quiesce_tmo;
+} __packed;
+
+#define ND_INTEL_BUS_FWA_STATUS_NOARM  (6 | 1 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_BUSY   (6 | 2 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOFW   (6 | 3 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_TMO    (6 | 4 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16)
+#define ND_INTEL_BUS_FWA_STATUS_ABORT  (6 | 6 << 16)
+
+#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0)
+#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1)
+struct nd_intel_bus_fw_activate {
+       u8 iodev_state;
+       u32 status;
+} __packed;
+
+extern const struct nvdimm_fw_ops *intel_fw_ops;
+extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops;
 #endif
index a303f01..c674f3d 100644 (file)
@@ -18,6 +18,7 @@
 
 /* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
 #define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149"
 
 /* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
 #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
@@ -33,7 +34,6 @@
                | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
                | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
 
-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
 #define NVDIMM_CMD_MAX 31
 
 #define NVDIMM_STANDARD_CMDMASK \
@@ -66,6 +66,13 @@ enum nvdimm_family_cmds {
        NVDIMM_INTEL_QUERY_OVERWRITE = 26,
        NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27,
        NVDIMM_INTEL_MASTER_SECURE_ERASE = 28,
+       NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29,
+       NVDIMM_INTEL_FW_ACTIVATE_ARM = 30,
+};
+
+enum nvdimm_bus_family_cmds {
+       NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1,
+       NVDIMM_BUS_INTEL_FW_ACTIVATE = 2,
 };
 
 #define NVDIMM_INTEL_SECURITY_CMDMASK \
@@ -76,13 +83,22 @@ enum nvdimm_family_cmds {
 | 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \
 | 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE)
 
+#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM)
+
+#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \
+(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE)
+
 #define NVDIMM_INTEL_CMDMASK \
 (NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
  | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
  | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
  | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
  | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \
- | NVDIMM_INTEL_SECURITY_CMDMASK)
+ | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
+
+#define NVDIMM_INTEL_DENY_CMDMASK \
+(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
 
 enum nfit_uuids {
        /* for simplicity alias the uuid index with the family id */
@@ -91,6 +107,11 @@ enum nfit_uuids {
        NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
        NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
        NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV,
+       /*
+        * to_nfit_bus_uuid() expects to translate bus uuid family ids
+        * to a UUID index using NVDIMM_FAMILY_MAX as an offset
+        */
+       NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL,
        NFIT_SPA_VOLATILE,
        NFIT_SPA_PM,
        NFIT_SPA_DCR,
@@ -199,6 +220,9 @@ struct nfit_mem {
        struct list_head list;
        struct acpi_device *adev;
        struct acpi_nfit_desc *acpi_desc;
+       enum nvdimm_fwa_state fwa_state;
+       enum nvdimm_fwa_result fwa_result;
+       int fwa_count;
        char id[NFIT_DIMM_ID_LEN+1];
        struct resource *flush_wpq;
        unsigned long dsm_mask;
@@ -238,11 +262,17 @@ struct acpi_nfit_desc {
        unsigned long scrub_flags;
        unsigned long dimm_cmd_force_en;
        unsigned long bus_cmd_force_en;
-       unsigned long bus_nfit_cmd_force_en;
+       unsigned long bus_dsm_mask;
+       unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1];
        unsigned int platform_cap;
        unsigned int scrub_tmo;
        int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
                        void *iobuf, u64 len, int rw);
+       enum nvdimm_fwa_state fwa_state;
+       enum nvdimm_fwa_capability fwa_cap;
+       int fwa_count;
+       bool fwa_noidle;
+       bool fwa_nosuspend;
 };
 
 enum scrub_mode {
@@ -345,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event);
 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc);
 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
+bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
+extern struct device_attribute dev_attr_firmware_activate_noidle;
 #endif /* __NFIT_H__ */
index d181601..2f137d6 100644 (file)
@@ -1171,6 +1171,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        if (part_shift)
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+       if (partscan)
+               lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
 
        /* Grab the block_device to prevent its destruction after we
         * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
index 5eddfd2..b241a09 100644 (file)
@@ -45,7 +45,7 @@ void rnbd_dev_close(struct rnbd_dev *dev)
        kfree(dev);
 }
 
-static void rnbd_dev_bi_end_io(struct bio *bio)
+void rnbd_dev_bi_end_io(struct bio *bio)
 {
        struct rnbd_dev_blk_io *io = bio->bi_private;
 
@@ -63,8 +63,8 @@ static void rnbd_dev_bi_end_io(struct bio *bio)
  *     Map the kernel address into a bio suitable for io to a block
  *     device. Returns an error pointer in case of error.
  */
-static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
-                                    unsigned int len, gfp_t gfp_mask)
+struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+                             unsigned int len, gfp_t gfp_mask)
 {
        unsigned long kaddr = (unsigned long)data;
        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -99,36 +99,5 @@ static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
                offset = 0;
        }
 
-       bio->bi_end_io = bio_put;
        return bio;
 }
-
-int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
-                      size_t len, u32 bi_size, enum rnbd_io_flags flags,
-                      short prio, void *priv)
-{
-       struct rnbd_dev_blk_io *io;
-       struct bio *bio;
-
-       /* Generate bio with pages pointing to the rdma buffer */
-       bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
-       if (IS_ERR(bio))
-               return PTR_ERR(bio);
-
-       io = container_of(bio, struct rnbd_dev_blk_io, bio);
-
-       io->dev = dev;
-       io->priv = priv;
-
-       bio->bi_end_io = rnbd_dev_bi_end_io;
-       bio->bi_private = io;
-       bio->bi_opf = rnbd_to_bio_flags(flags);
-       bio->bi_iter.bi_sector = sector;
-       bio->bi_iter.bi_size = bi_size;
-       bio_set_prio(bio, prio);
-       bio_set_dev(bio, dev->bdev);
-
-       submit_bio(bio);
-
-       return 0;
-}
index 0f65b09..0eb2385 100644 (file)
@@ -41,6 +41,11 @@ void rnbd_dev_close(struct rnbd_dev *dev);
 
 void rnbd_endio(void *priv, int error);
 
+void rnbd_dev_bi_end_io(struct bio *bio);
+
+struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+                             unsigned int len, gfp_t gfp_mask);
+
 static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
 {
        return queue_max_segments(bdev_get_queue(dev->bdev));
@@ -75,18 +80,4 @@ static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
        return bdev_get_queue(dev->bdev)->limits.discard_alignment;
 }
 
-/**
- * rnbd_dev_submit_io() - Submit an I/O to the disk
- * @dev:       device to that the I/O is submitted
- * @sector:    address to read/write data to
- * @data:      I/O data to write or buffer to read I/O date into
- * @len:       length of @data
- * @bi_size:   Amount of data that will be read/written
- * @prio:       IO priority
- * @priv:      private data passed to @io_fn
- */
-int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
-                       size_t len, u32 bi_size, enum rnbd_io_flags flags,
-                       short prio, void *priv);
-
 #endif /* RNBD_SRV_DEV_H */
index 86e6152..0fb9484 100644 (file)
@@ -124,6 +124,9 @@ static int process_rdma(struct rtrs_srv *sess,
        struct rnbd_srv_sess_dev *sess_dev;
        u32 dev_id;
        int err;
+       struct rnbd_dev_blk_io *io;
+       struct bio *bio;
+       short prio;
 
        priv = kmalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -142,18 +145,29 @@ static int process_rdma(struct rtrs_srv *sess,
        priv->sess_dev = sess_dev;
        priv->id = id;
 
-       err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector),
-                                 data, datalen, le32_to_cpu(msg->bi_size),
-                                 le32_to_cpu(msg->rw),
-                                 srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
-                                 usrlen < sizeof(*msg) ?
-                                 0 : le16_to_cpu(msg->prio), priv);
-       if (unlikely(err)) {
-               rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
-                             err);
+       /* Generate bio with pages pointing to the rdma buffer */
+       bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
+       if (IS_ERR(bio)) {
+               rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio));
                goto sess_dev_put;
        }
 
+       io = container_of(bio, struct rnbd_dev_blk_io, bio);
+       io->dev = sess_dev->rnbd_dev;
+       io->priv = priv;
+
+       bio->bi_end_io = rnbd_dev_bi_end_io;
+       bio->bi_private = io;
+       bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
+       bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
+       bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
+       prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
+              usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
+       bio_set_prio(bio, prio);
+       bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
+
+       submit_bio(bio);
+
        return 0;
 
 sess_dev_put:
index 690a258..4026fac 100644 (file)
@@ -50,7 +50,7 @@ source "drivers/clk/versatile/Kconfig"
 config CLK_HSDK
        bool "PLL Driver for HSDK platform"
        depends on OF || COMPILE_TEST
-       depends on IOMEM
+       depends on HAS_IOMEM
        help
          This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
          control.
index ca9af11..da8fcf1 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925)      += clk-cdce925.o
 obj-$(CONFIG_ARCH_CLPS711X)            += clk-clps711x.o
 obj-$(CONFIG_COMMON_CLK_CS2000_CP)     += clk-cs2000-cp.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
+obj-$(CONFIG_ARCH_SPARX5)              += clk-sparx5.o
 obj-$(CONFIG_COMMON_CLK_FIXED_MMIO)    += clk-fixed-mmio.o
 obj-$(CONFIG_COMMON_CLK_FSL_SAI)       += clk-fsl-sai.o
 obj-$(CONFIG_COMMON_CLK_GEMINI)                += clk-gemini.o
index e2007ac..61bb224 100644 (file)
 #include "owl-gate.h"
 #include "owl-mux.h"
 #include "owl-pll.h"
+#include "owl-reset.h"
 
 #include <dt-bindings/clock/actions,s500-cmu.h>
+#include <dt-bindings/reset/actions,s500-reset.h>
 
 #define CMU_COREPLL                    (0x0000)
 #define CMU_DEVPLL                     (0x0004)
@@ -175,6 +177,8 @@ static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RAT
 static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT);
 
 /* gate clocks */
+static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0);
+static OWL_GATE(dmac_clk, "dmac_clk", "h_clk", CMU_DEVCLKEN0, 1, 0, 0);
 static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED);
 static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED);
 static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED);
@@ -183,7 +187,8 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
 static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
 
 /* divider clocks */
-static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
+static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0);
 static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
 
 /* factor clocks */
@@ -428,6 +433,9 @@ static struct owl_clk_common *s500_clks[] = {
        &spdif_clk.common,
        &nand_clk.common,
        &ecc_clk.common,
+       &apb_clk.common,
+       &dmac_clk.common,
+       &gpio_clk.common,
 };
 
 static struct clk_hw_onecell_data s500_hw_clks = {
@@ -484,24 +492,103 @@ static struct clk_hw_onecell_data s500_hw_clks = {
                [CLK_SPDIF]             = &spdif_clk.common.hw,
                [CLK_NAND]              = &nand_clk.common.hw,
                [CLK_ECC]               = &ecc_clk.common.hw,
+               [CLK_APB]               = &apb_clk.common.hw,
+               [CLK_DMAC]              = &dmac_clk.common.hw,
+               [CLK_GPIO]              = &gpio_clk.common.hw,
        },
        .num = CLK_NR_CLKS,
 };
 
+static const struct owl_reset_map s500_resets[] = {
+       [RESET_DMAC]    = { CMU_DEVRST0, BIT(0) },
+       [RESET_NORIF]   = { CMU_DEVRST0, BIT(1) },
+       [RESET_DDR]     = { CMU_DEVRST0, BIT(2) },
+       [RESET_NANDC]   = { CMU_DEVRST0, BIT(3) },
+       [RESET_SD0]     = { CMU_DEVRST0, BIT(4) },
+       [RESET_SD1]     = { CMU_DEVRST0, BIT(5) },
+       [RESET_PCM1]    = { CMU_DEVRST0, BIT(6) },
+       [RESET_DE]      = { CMU_DEVRST0, BIT(7) },
+       [RESET_LCD]     = { CMU_DEVRST0, BIT(8) },
+       [RESET_SD2]     = { CMU_DEVRST0, BIT(9) },
+       [RESET_DSI]     = { CMU_DEVRST0, BIT(10) },
+       [RESET_CSI]     = { CMU_DEVRST0, BIT(11) },
+       [RESET_BISP]    = { CMU_DEVRST0, BIT(12) },
+       [RESET_KEY]     = { CMU_DEVRST0, BIT(14) },
+       [RESET_GPIO]    = { CMU_DEVRST0, BIT(15) },
+       [RESET_AUDIO]   = { CMU_DEVRST0, BIT(17) },
+       [RESET_PCM0]    = { CMU_DEVRST0, BIT(18) },
+       [RESET_VDE]     = { CMU_DEVRST0, BIT(19) },
+       [RESET_VCE]     = { CMU_DEVRST0, BIT(20) },
+       [RESET_GPU3D]   = { CMU_DEVRST0, BIT(22) },
+       [RESET_NIC301]  = { CMU_DEVRST0, BIT(23) },
+       [RESET_LENS]    = { CMU_DEVRST0, BIT(26) },
+       [RESET_PERIPHRESET] = { CMU_DEVRST0, BIT(27) },
+       [RESET_USB2_0]  = { CMU_DEVRST1, BIT(0) },
+       [RESET_TVOUT]   = { CMU_DEVRST1, BIT(1) },
+       [RESET_HDMI]    = { CMU_DEVRST1, BIT(2) },
+       [RESET_HDCP2TX] = { CMU_DEVRST1, BIT(3) },
+       [RESET_UART6]   = { CMU_DEVRST1, BIT(4) },
+       [RESET_UART0]   = { CMU_DEVRST1, BIT(5) },
+       [RESET_UART1]   = { CMU_DEVRST1, BIT(6) },
+       [RESET_UART2]   = { CMU_DEVRST1, BIT(7) },
+       [RESET_SPI0]    = { CMU_DEVRST1, BIT(8) },
+       [RESET_SPI1]    = { CMU_DEVRST1, BIT(9) },
+       [RESET_SPI2]    = { CMU_DEVRST1, BIT(10) },
+       [RESET_SPI3]    = { CMU_DEVRST1, BIT(11) },
+       [RESET_I2C0]    = { CMU_DEVRST1, BIT(12) },
+       [RESET_I2C1]    = { CMU_DEVRST1, BIT(13) },
+       [RESET_USB3]    = { CMU_DEVRST1, BIT(14) },
+       [RESET_UART3]   = { CMU_DEVRST1, BIT(15) },
+       [RESET_UART4]   = { CMU_DEVRST1, BIT(16) },
+       [RESET_UART5]   = { CMU_DEVRST1, BIT(17) },
+       [RESET_I2C2]    = { CMU_DEVRST1, BIT(18) },
+       [RESET_I2C3]    = { CMU_DEVRST1, BIT(19) },
+       [RESET_ETHERNET] = { CMU_DEVRST1, BIT(20) },
+       [RESET_CHIPID]  = { CMU_DEVRST1, BIT(21) },
+       [RESET_USB2_1]  = { CMU_DEVRST1, BIT(22) },
+       [RESET_WD0RESET] = { CMU_DEVRST1, BIT(24) },
+       [RESET_WD1RESET] = { CMU_DEVRST1, BIT(25) },
+       [RESET_WD2RESET] = { CMU_DEVRST1, BIT(26) },
+       [RESET_WD3RESET] = { CMU_DEVRST1, BIT(27) },
+       [RESET_DBG0RESET] = { CMU_DEVRST1, BIT(28) },
+       [RESET_DBG1RESET] = { CMU_DEVRST1, BIT(29) },
+       [RESET_DBG2RESET] = { CMU_DEVRST1, BIT(30) },
+       [RESET_DBG3RESET] = { CMU_DEVRST1, BIT(31) },
+};
+
 static struct owl_clk_desc s500_clk_desc = {
        .clks       = s500_clks,
        .num_clks   = ARRAY_SIZE(s500_clks),
 
        .hw_clks    = &s500_hw_clks,
+
+       .resets     = s500_resets,
+       .num_resets = ARRAY_SIZE(s500_resets),
 };
 
 static int s500_clk_probe(struct platform_device *pdev)
 {
        struct owl_clk_desc *desc;
+       struct owl_reset *reset;
+       int ret;
 
        desc = &s500_clk_desc;
        owl_clk_regmap_init(pdev, desc);
 
+       reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+       if (!reset)
+               return -ENOMEM;
+
+       reset->rcdev.of_node = pdev->dev.of_node;
+       reset->rcdev.ops = &owl_reset_ops;
+       reset->rcdev.nr_resets = desc->num_resets;
+       reset->reset_map = desc->resets;
+       reset->regmap = desc->regmap;
+
+       ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+       if (ret)
+               dev_err(&pdev->dev, "Failed to register reset controller\n");
+
        return owl_clk_probe(&pdev->dev, desc->hw_clks);
 }
 
index 8b90357..79301e1 100644 (file)
@@ -23,3 +23,4 @@ obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
 obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o
 obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
 obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
+obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
index 38bdb49..2c3d8e6 100644 (file)
@@ -160,7 +160,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 4, i,
-                                                   &at91rm9200_programmable_layout);
+                                                   &at91rm9200_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
index 6d0723a..bb81ff7 100644 (file)
@@ -436,7 +436,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 4, i,
-                                                   &at91rm9200_programmable_layout);
+                                                   &at91rm9200_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
index 9873b58..c88ee20 100644 (file)
@@ -111,7 +111,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
@@ -181,7 +181,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91sam9g45_programmable_layout);
+                                                   &at91sam9g45_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
index 630dc5d..93f7eb2 100644 (file)
@@ -124,7 +124,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
@@ -199,7 +199,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &at91sam9x5_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -222,7 +223,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
                                                         at91sam9n12_periphck[i].n,
                                                         "masterck",
                                                         at91sam9n12_periphck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
index 0d1cc44..a343eb6 100644 (file)
@@ -137,7 +137,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91rm9200_programmable_layout);
+                                                   &at91rm9200_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
index 0ce3da0..22b9aad 100644 (file)
@@ -226,7 +226,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &at91sam9x5_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -257,7 +258,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
                                                         at91sam9x5_periphck[i].n,
                                                         "masterck",
                                                         at91sam9x5_periphck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -270,7 +271,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
                                                         extra_pcks[i].n,
                                                         "masterck",
                                                         extra_pcks[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
index 44a46dc..b4fc8d7 100644 (file)
 
 #define GENERATED_MAX_DIV      255
 
-#define GCK_INDEX_DT_AUDIO_PLL 5
-
 struct clk_generated {
        struct clk_hw hw;
        struct regmap *regmap;
        struct clk_range range;
        spinlock_t *lock;
+       u32 *mux_table;
        u32 id;
        u32 gckdiv;
        const struct clk_pcr_layout *layout;
        u8 parent_id;
-       bool audio_pll_allowed;
+       int chg_pid;
 };
 
 #define to_clk_generated(hw) \
@@ -83,7 +82,7 @@ static int clk_generated_is_enabled(struct clk_hw *hw)
        regmap_read(gck->regmap, gck->layout->offset, &status);
        spin_unlock_irqrestore(gck->lock, flags);
 
-       return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
+       return !!(status & AT91_PMC_PCR_GCKEN);
 }
 
 static unsigned long
@@ -109,7 +108,7 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
                tmp_rate = parent_rate / div;
        tmp_diff = abs(req->rate - tmp_rate);
 
-       if (*best_diff < 0 || *best_diff > tmp_diff) {
+       if (*best_diff < 0 || *best_diff >= tmp_diff) {
                *best_rate = tmp_rate;
                *best_diff = tmp_diff;
                req->best_parent_rate = parent_rate;
@@ -129,7 +128,10 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
        int i;
        u32 div;
 
-       for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
+       for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+               if (gck->chg_pid == i)
+                       continue;
+
                parent = clk_hw_get_parent_by_index(hw, i);
                if (!parent)
                        continue;
@@ -161,16 +163,17 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
         * that the only clks able to modify gck rate are those of audio IPs.
         */
 
-       if (!gck->audio_pll_allowed)
+       if (gck->chg_pid < 0)
                goto end;
 
-       parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
+       parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
        if (!parent)
                goto end;
 
        for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
                req_parent.rate = req->rate * div;
-               __clk_determine_rate(parent, &req_parent);
+               if (__clk_determine_rate(parent, &req_parent))
+                       continue;
                clk_generated_best_diff(req, parent, req_parent.rate, div,
                                        &best_diff, &best_rate);
 
@@ -184,8 +187,8 @@ end:
                 __clk_get_name((req->best_parent_hw)->clk),
                 req->best_parent_rate);
 
-       if (best_rate < 0)
-               return best_rate;
+       if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
+               return -EINVAL;
 
        req->rate = best_rate;
        return 0;
@@ -199,7 +202,11 @@ static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
        if (index >= clk_hw_get_num_parents(hw))
                return -EINVAL;
 
-       gck->parent_id = index;
+       if (gck->mux_table)
+               gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
+       else
+               gck->parent_id = index;
+
        return 0;
 }
 
@@ -271,8 +278,9 @@ struct clk_hw * __init
 at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
                            const struct clk_pcr_layout *layout,
                            const char *name, const char **parent_names,
-                           u8 num_parents, u8 id, bool pll_audio,
-                           const struct clk_range *range)
+                           u32 *mux_table, u8 num_parents, u8 id,
+                           const struct clk_range *range,
+                           int chg_pid)
 {
        struct clk_generated *gck;
        struct clk_init_data init;
@@ -287,16 +295,18 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
        init.ops = &generated_ops;
        init.parent_names = parent_names;
        init.num_parents = num_parents;
-       init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
-               CLK_SET_RATE_PARENT;
+       init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+       if (chg_pid >= 0)
+               init.flags |= CLK_SET_RATE_PARENT;
 
        gck->id = id;
        gck->hw.init = &init;
        gck->regmap = regmap;
        gck->lock = lock;
        gck->range = *range;
-       gck->audio_pll_allowed = pll_audio;
+       gck->chg_pid = chg_pid;
        gck->layout = layout;
+       gck->mux_table = mux_table;
 
        clk_generated_startup(gck);
        hw = &gck->hw;
index 37c2266..5c83e89 100644 (file)
@@ -175,7 +175,7 @@ static bool clk_main_rc_osc_ready(struct regmap *regmap)
 
        regmap_read(regmap, AT91_PMC_SR, &status);
 
-       return status & AT91_PMC_MOSCRCS;
+       return !!(status & AT91_PMC_MOSCRCS);
 }
 
 static int clk_main_rc_osc_prepare(struct clk_hw *hw)
@@ -336,7 +336,7 @@ static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
 
        regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status);
 
-       return status & AT91_PMC_MAINRDY ? 1 : 0;
+       return !!(status & AT91_PMC_MAINRDY);
 }
 
 static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
@@ -398,7 +398,7 @@ static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
 
        regmap_read(regmap, AT91_PMC_SR, &status);
 
-       return status & AT91_PMC_MOSCSELS ? 1 : 0;
+       return !!(status & AT91_PMC_MOSCSELS);
 }
 
 static int clk_sam9x5_main_prepare(struct clk_hw *hw)
index e7e0ba6..bd0d8a6 100644 (file)
 #define MASTER_DIV_SHIFT       8
 #define MASTER_DIV_MASK                0x3
 
+#define PMC_MCR                        0x30
+#define PMC_MCR_ID_MSK         GENMASK(3, 0)
+#define PMC_MCR_CMD            BIT(7)
+#define PMC_MCR_DIV            GENMASK(10, 8)
+#define PMC_MCR_CSS            GENMASK(20, 16)
+#define PMC_MCR_CSS_SHIFT      (16)
+#define PMC_MCR_EN             BIT(28)
+
+#define PMC_MCR_ID(x)          ((x) & PMC_MCR_ID_MSK)
+
+#define MASTER_MAX_ID          4
+
 #define to_clk_master(hw) container_of(hw, struct clk_master, hw)
 
 struct clk_master {
        struct clk_hw hw;
        struct regmap *regmap;
+       spinlock_t *lock;
        const struct clk_master_layout *layout;
        const struct clk_master_characteristics *characteristics;
+       u32 *mux_table;
        u32 mckr;
+       int chg_pid;
+       u8 id;
+       u8 parent;
+       u8 div;
 };
 
-static inline bool clk_master_ready(struct regmap *regmap)
+static inline bool clk_master_ready(struct clk_master *master)
 {
+       unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY;
        unsigned int status;
 
-       regmap_read(regmap, AT91_PMC_SR, &status);
+       regmap_read(master->regmap, AT91_PMC_SR, &status);
 
-       return status & AT91_PMC_MCKRDY ? 1 : 0;
+       return !!(status & bit);
 }
 
 static int clk_master_prepare(struct clk_hw *hw)
 {
        struct clk_master *master = to_clk_master(hw);
 
-       while (!clk_master_ready(master->regmap))
+       while (!clk_master_ready(master))
                cpu_relax();
 
        return 0;
@@ -50,7 +69,7 @@ static int clk_master_is_prepared(struct clk_hw *hw)
 {
        struct clk_master *master = to_clk_master(hw);
 
-       return clk_master_ready(master->regmap);
+       return clk_master_ready(master);
 }
 
 static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
@@ -143,6 +162,287 @@ at91_clk_register_master(struct regmap *regmap,
        return hw;
 }
 
+static unsigned long
+clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
+                              unsigned long parent_rate)
+{
+       struct clk_master *master = to_clk_master(hw);
+
+       return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
+}
+
+static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
+                                        struct clk_hw *parent,
+                                        unsigned long parent_rate,
+                                        long *best_rate,
+                                        long *best_diff,
+                                        u32 div)
+{
+       unsigned long tmp_rate, tmp_diff;
+
+       if (div == MASTER_PRES_MAX)
+               tmp_rate = parent_rate / 3;
+       else
+               tmp_rate = parent_rate >> div;
+
+       tmp_diff = abs(req->rate - tmp_rate);
+
+       if (*best_diff < 0 || *best_diff >= tmp_diff) {
+               *best_rate = tmp_rate;
+               *best_diff = tmp_diff;
+               req->best_parent_rate = parent_rate;
+               req->best_parent_hw = parent;
+       }
+}
+
+static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
+                                            struct clk_rate_request *req)
+{
+       struct clk_master *master = to_clk_master(hw);
+       struct clk_rate_request req_parent = *req;
+       struct clk_hw *parent;
+       long best_rate = LONG_MIN, best_diff = LONG_MIN;
+       unsigned long parent_rate;
+       unsigned int div, i;
+
+       /* First: check the dividers of MCR. */
+       for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+               parent = clk_hw_get_parent_by_index(hw, i);
+               if (!parent)
+                       continue;
+
+               parent_rate = clk_hw_get_rate(parent);
+               if (!parent_rate)
+                       continue;
+
+               for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+                       clk_sama7g5_master_best_diff(req, parent, parent_rate,
+                                                    &best_rate, &best_diff,
+                                                    div);
+                       if (!best_diff)
+                               break;
+               }
+
+               if (!best_diff)
+                       break;
+       }
+
+       /* Second: try to request rate form changeable parent. */
+       if (master->chg_pid < 0)
+               goto end;
+
+       parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
+       if (!parent)
+               goto end;
+
+       for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+               if (div == MASTER_PRES_MAX)
+                       req_parent.rate = req->rate * 3;
+               else
+                       req_parent.rate = req->rate << div;
+
+               if (__clk_determine_rate(parent, &req_parent))
+                       continue;
+
+               clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
+                                            &best_rate, &best_diff, div);
+
+               if (!best_diff)
+                       break;
+       }
+
+end:
+       pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+                __func__, best_rate,
+                __clk_get_name((req->best_parent_hw)->clk),
+               req->best_parent_rate);
+
+       if (best_rate < 0)
+               return -EINVAL;
+
+       req->rate = best_rate;
+
+       return 0;
+}
+
+static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long flags;
+       u8 index;
+
+       spin_lock_irqsave(master->lock, flags);
+       index = clk_mux_val_to_index(&master->hw, master->mux_table, 0,
+                                    master->parent);
+       spin_unlock_irqrestore(master->lock, flags);
+
+       return index;
+}
+
+static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long flags;
+
+       if (index >= clk_hw_get_num_parents(hw))
+               return -EINVAL;
+
+       spin_lock_irqsave(master->lock, flags);
+       master->parent = clk_mux_index_to_val(master->mux_table, 0, index);
+       spin_unlock_irqrestore(master->lock, flags);
+
+       return 0;
+}
+
+static int clk_sama7g5_master_enable(struct clk_hw *hw)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long flags;
+       unsigned int val, cparent;
+
+       spin_lock_irqsave(master->lock, flags);
+
+       regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id));
+       regmap_read(master->regmap, PMC_MCR, &val);
+       regmap_update_bits(master->regmap, PMC_MCR,
+                          PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV |
+                          PMC_MCR_CMD | PMC_MCR_ID_MSK,
+                          PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) |
+                          (master->div << MASTER_DIV_SHIFT) |
+                          PMC_MCR_CMD | PMC_MCR_ID(master->id));
+
+       cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+
+       /* Wait here only if parent is being changed. */
+       while ((cparent != master->parent) && !clk_master_ready(master))
+               cpu_relax();
+
+       spin_unlock_irqrestore(master->lock, flags);
+
+       return 0;
+}
+
+static void clk_sama7g5_master_disable(struct clk_hw *hw)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long flags;
+
+       spin_lock_irqsave(master->lock, flags);
+
+       regmap_write(master->regmap, PMC_MCR, master->id);
+       regmap_update_bits(master->regmap, PMC_MCR,
+                          PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK,
+                          PMC_MCR_CMD | PMC_MCR_ID(master->id));
+
+       spin_unlock_irqrestore(master->lock, flags);
+}
+
+static int clk_sama7g5_master_is_enabled(struct clk_hw *hw)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long flags;
+       unsigned int val;
+
+       spin_lock_irqsave(master->lock, flags);
+
+       regmap_write(master->regmap, PMC_MCR, master->id);
+       regmap_read(master->regmap, PMC_MCR, &val);
+
+       spin_unlock_irqrestore(master->lock, flags);
+
+       return !!(val & PMC_MCR_EN);
+}
+
+static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long parent_rate)
+{
+       struct clk_master *master = to_clk_master(hw);
+       unsigned long div, flags;
+
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1)))
+               return -EINVAL;
+
+       if (div == 3)
+               div = MASTER_PRES_MAX;
+       else
+               div = ffs(div) - 1;
+
+       spin_lock_irqsave(master->lock, flags);
+       master->div = div;
+       spin_unlock_irqrestore(master->lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops sama7g5_master_ops = {
+       .enable = clk_sama7g5_master_enable,
+       .disable = clk_sama7g5_master_disable,
+       .is_enabled = clk_sama7g5_master_is_enabled,
+       .recalc_rate = clk_sama7g5_master_recalc_rate,
+       .determine_rate = clk_sama7g5_master_determine_rate,
+       .set_rate = clk_sama7g5_master_set_rate,
+       .get_parent = clk_sama7g5_master_get_parent,
+       .set_parent = clk_sama7g5_master_set_parent,
+};
+
+struct clk_hw * __init
+at91_clk_sama7g5_register_master(struct regmap *regmap,
+                                const char *name, int num_parents,
+                                const char **parent_names,
+                                u32 *mux_table,
+                                spinlock_t *lock, u8 id,
+                                bool critical, int chg_pid)
+{
+       struct clk_master *master;
+       struct clk_hw *hw;
+       struct clk_init_data init;
+       unsigned long flags;
+       unsigned int val;
+       int ret;
+
+       if (!name || !num_parents || !parent_names || !mux_table ||
+           !lock || id > MASTER_MAX_ID)
+               return ERR_PTR(-EINVAL);
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &sama7g5_master_ops;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+       init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+       if (chg_pid >= 0)
+               init.flags |= CLK_SET_RATE_PARENT;
+       if (critical)
+               init.flags |= CLK_IS_CRITICAL;
+
+       master->hw.init = &init;
+       master->regmap = regmap;
+       master->id = id;
+       master->chg_pid = chg_pid;
+       master->lock = lock;
+       master->mux_table = mux_table;
+
+       spin_lock_irqsave(master->lock, flags);
+       regmap_write(master->regmap, PMC_MCR, master->id);
+       regmap_read(master->regmap, PMC_MCR, &val);
+       master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+       master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT;
+       spin_unlock_irqrestore(master->lock, flags);
+
+       hw = &master->hw;
+       ret = clk_hw_register(NULL, &master->hw);
+       if (ret) {
+               kfree(master);
+               hw = ERR_PTR(ret);
+       }
+
+       return hw;
+}
+
 const struct clk_master_layout at91rm9200_master_layout = {
        .mask = 0x31F,
        .pres_shift = 2,
index c2ab486..7867eaf 100644 (file)
@@ -38,6 +38,7 @@ struct clk_sam9x5_peripheral {
        u32 div;
        const struct clk_pcr_layout *layout;
        bool auto_div;
+       int chg_pid;
 };
 
 #define to_clk_sam9x5_peripheral(hw) \
@@ -208,7 +209,7 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
        regmap_read(periph->regmap, periph->layout->offset, &status);
        spin_unlock_irqrestore(periph->lock, flags);
 
-       return status & AT91_PMC_PCR_EN ? 1 : 0;
+       return !!(status & AT91_PMC_PCR_EN);
 }
 
 static unsigned long
@@ -238,6 +239,87 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
        return parent_rate >> periph->div;
 }
 
+static void clk_sam9x5_peripheral_best_diff(struct clk_rate_request *req,
+                                           struct clk_hw *parent,
+                                           unsigned long parent_rate,
+                                           u32 shift, long *best_diff,
+                                           long *best_rate)
+{
+       unsigned long tmp_rate = parent_rate >> shift;
+       unsigned long tmp_diff = abs(req->rate - tmp_rate);
+
+       if (*best_diff < 0 || *best_diff >= tmp_diff) {
+               *best_rate = tmp_rate;
+               *best_diff = tmp_diff;
+               req->best_parent_rate = parent_rate;
+               req->best_parent_hw = parent;
+       }
+}
+
+static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
+                                               struct clk_rate_request *req)
+{
+       struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+       struct clk_hw *parent = clk_hw_get_parent(hw);
+       struct clk_rate_request req_parent = *req;
+       unsigned long parent_rate = clk_hw_get_rate(parent);
+       unsigned long tmp_rate;
+       long best_rate = LONG_MIN;
+       long best_diff = LONG_MIN;
+       u32 shift;
+
+       if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
+               return parent_rate;
+
+       /* Fist step: check the available dividers. */
+       for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+               tmp_rate = parent_rate >> shift;
+
+               if (periph->range.max && tmp_rate > periph->range.max)
+                       continue;
+
+               clk_sam9x5_peripheral_best_diff(req, parent, parent_rate,
+                                               shift, &best_diff, &best_rate);
+
+               if (!best_diff || best_rate <= req->rate)
+                       break;
+       }
+
+       if (periph->chg_pid < 0)
+               goto end;
+
+       /* Step two: try to request rate from parent. */
+       parent = clk_hw_get_parent_by_index(hw, periph->chg_pid);
+       if (!parent)
+               goto end;
+
+       for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
+               req_parent.rate = req->rate << shift;
+
+               if (__clk_determine_rate(parent, &req_parent))
+                       continue;
+
+               clk_sam9x5_peripheral_best_diff(req, parent, req_parent.rate,
+                                               shift, &best_diff, &best_rate);
+
+               if (!best_diff)
+                       break;
+       }
+end:
+       if (best_rate < 0 ||
+           (periph->range.max && best_rate > periph->range.max))
+               return -EINVAL;
+
+       pr_debug("PCK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+                __func__, best_rate,
+                __clk_get_name((req->best_parent_hw)->clk),
+                req->best_parent_rate);
+
+       req->rate = best_rate;
+
+       return 0;
+}
+
 static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
                                             unsigned long rate,
                                             unsigned long *parent_rate)
@@ -320,11 +402,21 @@ static const struct clk_ops sam9x5_peripheral_ops = {
        .set_rate = clk_sam9x5_peripheral_set_rate,
 };
 
+static const struct clk_ops sam9x5_peripheral_chg_ops = {
+       .enable = clk_sam9x5_peripheral_enable,
+       .disable = clk_sam9x5_peripheral_disable,
+       .is_enabled = clk_sam9x5_peripheral_is_enabled,
+       .recalc_rate = clk_sam9x5_peripheral_recalc_rate,
+       .determine_rate = clk_sam9x5_peripheral_determine_rate,
+       .set_rate = clk_sam9x5_peripheral_set_rate,
+};
+
 struct clk_hw * __init
 at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
                                    const struct clk_pcr_layout *layout,
                                    const char *name, const char *parent_name,
-                                   u32 id, const struct clk_range *range)
+                                   u32 id, const struct clk_range *range,
+                                   int chg_pid)
 {
        struct clk_sam9x5_peripheral *periph;
        struct clk_init_data init;
@@ -339,10 +431,16 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.ops = &sam9x5_peripheral_ops;
-       init.parent_names = (parent_name ? &parent_name : NULL);
-       init.num_parents = (parent_name ? 1 : 0);
-       init.flags = 0;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+       if (chg_pid < 0) {
+               init.flags = 0;
+               init.ops = &sam9x5_peripheral_ops;
+       } else {
+               init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+                            CLK_SET_RATE_PARENT;
+               init.ops = &sam9x5_peripheral_chg_ops;
+       }
 
        periph->id = id;
        periph->hw.init = &init;
@@ -353,6 +451,7 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
                periph->auto_div = true;
        periph->layout = layout;
        periph->range = *range;
+       periph->chg_pid = chg_pid;
 
        hw = &periph->hw;
        ret = clk_hw_register(NULL, &periph->hw);
index 8ee66fb..fcf8f6a 100644 (file)
@@ -21,6 +21,7 @@
 struct clk_programmable {
        struct clk_hw hw;
        struct regmap *regmap;
+       u32 *mux_table;
        u8 id;
        const struct clk_programmable_layout *layout;
 };
@@ -108,6 +109,9 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
        if (layout->have_slck_mck)
                mask |= AT91_PMC_CSSMCK_MCK;
 
+       if (prog->mux_table)
+               pckr = clk_mux_index_to_val(prog->mux_table, 0, index);
+
        if (index > layout->css_mask) {
                if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck)
                        return -EINVAL;
@@ -134,6 +138,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw)
        if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret)
                ret = PROG_MAX_RM9200_CSS + 1;
 
+       if (prog->mux_table)
+               ret = clk_mux_val_to_index(&prog->hw, prog->mux_table, 0, ret);
+
        return ret;
 }
 
@@ -182,7 +189,8 @@ struct clk_hw * __init
 at91_clk_register_programmable(struct regmap *regmap,
                               const char *name, const char **parent_names,
                               u8 num_parents, u8 id,
-                              const struct clk_programmable_layout *layout)
+                              const struct clk_programmable_layout *layout,
+                              u32 *mux_table)
 {
        struct clk_programmable *prog;
        struct clk_hw *hw;
@@ -206,6 +214,7 @@ at91_clk_register_programmable(struct regmap *regmap,
        prog->layout = layout;
        prog->hw.init = &init;
        prog->regmap = regmap;
+       prog->mux_table = mux_table;
 
        hw = &prog->hw;
        ret = clk_hw_register(NULL, &prog->hw);
index e699803..b473298 100644 (file)
 #include "pmc.h"
 
 #define        PMC_PLL_CTRL0_DIV_MSK   GENMASK(7, 0)
-#define        PMC_PLL_CTRL1_MUL_MSK   GENMASK(30, 24)
+#define        PMC_PLL_CTRL1_MUL_MSK   GENMASK(31, 24)
+#define        PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
 
 #define PLL_DIV_MAX            (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
 #define UPLL_DIV               2
 #define PLL_MUL_MAX            (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
 
-#define PLL_MAX_ID             1
+#define FCORE_MIN              (600000000)
+#define FCORE_MAX              (1200000000)
 
-struct sam9x60_pll {
-       struct clk_hw hw;
+#define PLL_MAX_ID             7
+
+struct sam9x60_pll_core {
        struct regmap *regmap;
        spinlock_t *lock;
        const struct clk_pll_characteristics *characteristics;
-       u32 frac;
+       const struct clk_pll_layout *layout;
+       struct clk_hw hw;
        u8 id;
-       u8 div;
+};
+
+struct sam9x60_frac {
+       struct sam9x60_pll_core core;
+       u32 frac;
        u16 mul;
 };
 
-#define to_sam9x60_pll(hw) container_of(hw, struct sam9x60_pll, hw)
+struct sam9x60_div {
+       struct sam9x60_pll_core core;
+       u8 div;
+};
+
+#define to_sam9x60_pll_core(hw)        container_of(hw, struct sam9x60_pll_core, hw)
+#define to_sam9x60_frac(core)  container_of(core, struct sam9x60_frac, core)
+#define to_sam9x60_div(core)   container_of(core, struct sam9x60_div, core)
 
 static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
 {
@@ -45,41 +60,53 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
        return !!(status & BIT(id));
 }
 
-static int sam9x60_pll_prepare(struct clk_hw *hw)
+static bool sam9x60_frac_pll_ready(struct regmap *regmap, u8 id)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
-       struct regmap *regmap = pll->regmap;
-       unsigned long flags;
-       u8 div;
-       u16 mul;
-       u32 val;
+       return sam9x60_pll_ready(regmap, id);
+}
 
-       spin_lock_irqsave(pll->lock, flags);
-       regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id);
+static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct sam9x60_frac *frac = to_sam9x60_frac(core);
 
-       regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
-       div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
+       return (parent_rate * (frac->mul + 1) +
+               ((u64)parent_rate * frac->frac >> 22));
+}
 
+static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct sam9x60_frac *frac = to_sam9x60_frac(core);
+       struct regmap *regmap = core->regmap;
+       unsigned int val, cfrac, cmul;
+       unsigned long flags;
+
+       spin_lock_irqsave(core->lock, flags);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, core->id);
        regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
-       mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+       cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
+       cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
 
-       if (sam9x60_pll_ready(regmap, pll->id) &&
-           (div == pll->div && mul == pll->mul)) {
-               spin_unlock_irqrestore(pll->lock, flags);
-               return 0;
-       }
+       if (sam9x60_frac_pll_ready(regmap, core->id) &&
+           (cmul == frac->mul && cfrac == frac->frac))
+               goto unlock;
 
-       /* Recommended value for AT91_PMC_PLL_ACR */
-       if (pll->characteristics->upll)
+       /* Recommended value for PMC_PLL_ACR */
+       if (core->characteristics->upll)
                val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
        else
                val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
        regmap_write(regmap, AT91_PMC_PLL_ACR, val);
 
        regmap_write(regmap, AT91_PMC_PLL_CTRL1,
-                    FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
+                    (frac->mul << core->layout->mul_shift) |
+                    (frac->frac << core->layout->frac_shift));
 
-       if (pll->characteristics->upll) {
+       if (core->characteristics->upll) {
                /* Enable the UTMI internal bandgap */
                val |= AT91_PMC_PLL_ACR_UTMIBG;
                regmap_write(regmap, AT91_PMC_PLL_ACR, val);
@@ -94,221 +121,409 @@ static int sam9x60_pll_prepare(struct clk_hw *hw)
        }
 
        regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
-                          AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+                          AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+                          AT91_PMC_PLL_UPDT_UPDATE | core->id);
 
-       regmap_write(regmap, AT91_PMC_PLL_CTRL0,
-                    AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL |
-                    AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div);
+       regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+                          AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
+                          AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
 
        regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
-                          AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+                          AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+                          AT91_PMC_PLL_UPDT_UPDATE | core->id);
 
-       while (!sam9x60_pll_ready(regmap, pll->id))
+       while (!sam9x60_pll_ready(regmap, core->id))
                cpu_relax();
 
-       spin_unlock_irqrestore(pll->lock, flags);
+unlock:
+       spin_unlock_irqrestore(core->lock, flags);
 
        return 0;
 }
 
-static int sam9x60_pll_is_prepared(struct clk_hw *hw)
+static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct regmap *regmap = core->regmap;
+       unsigned long flags;
+
+       spin_lock_irqsave(core->lock, flags);
 
-       return sam9x60_pll_ready(pll->regmap, pll->id);
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
+
+       if (core->characteristics->upll)
+               regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
+                                  AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+                          AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+       spin_unlock_irqrestore(core->lock, flags);
 }
 
-static void sam9x60_pll_unprepare(struct clk_hw *hw)
+static int sam9x60_frac_pll_is_prepared(struct clk_hw *hw)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
-       unsigned long flags;
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
 
-       spin_lock_irqsave(pll->lock, flags);
+       return sam9x60_pll_ready(core->regmap, core->id);
+}
 
-       regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id);
+static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
+                                             unsigned long rate,
+                                             unsigned long parent_rate,
+                                             bool update)
+{
+       struct sam9x60_frac *frac = to_sam9x60_frac(core);
+       unsigned long tmprate, remainder;
+       unsigned long nmul = 0;
+       unsigned long nfrac = 0;
 
-       regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
-                          AT91_PMC_PLL_CTRL0_ENPLLCK, 0);
+       if (rate < FCORE_MIN || rate > FCORE_MAX)
+               return -ERANGE;
 
-       regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
-                          AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+       /*
+        * Calculate the multiplier associated with the current
+        * divider that provide the closest rate to the requested one.
+        */
+       nmul = mult_frac(rate, 1, parent_rate);
+       tmprate = mult_frac(parent_rate, nmul, 1);
+       remainder = rate - tmprate;
 
-       regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0,
-                          AT91_PMC_PLL_CTRL0_ENPLL, 0);
+       if (remainder) {
+               nfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * (1 << 22),
+                                             parent_rate);
 
-       if (pll->characteristics->upll)
-               regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR,
-                                  AT91_PMC_PLL_ACR_UTMIBG |
-                                  AT91_PMC_PLL_ACR_UTMIVR, 0);
+               tmprate += DIV_ROUND_CLOSEST_ULL((u64)nfrac * parent_rate,
+                                                (1 << 22));
+       }
 
-       regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT,
-                          AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE);
+       /* Check if resulted rate is a valid.  */
+       if (tmprate < FCORE_MIN || tmprate > FCORE_MAX)
+               return -ERANGE;
 
-       spin_unlock_irqrestore(pll->lock, flags);
+       if (update) {
+               frac->mul = nmul - 1;
+               frac->frac = nfrac;
+       }
+
+       return tmprate;
 }
 
-static unsigned long sam9x60_pll_recalc_rate(struct clk_hw *hw,
-                                            unsigned long parent_rate)
+static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long *parent_rate)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
 
-       return (parent_rate * (pll->mul + 1)) / (pll->div + 1);
+       return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
 }
 
-static long sam9x60_pll_get_best_div_mul(struct sam9x60_pll *pll,
-                                        unsigned long rate,
-                                        unsigned long parent_rate,
-                                        bool update)
+static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
 {
-       const struct clk_pll_characteristics *characteristics =
-                                                       pll->characteristics;
-       unsigned long bestremainder = ULONG_MAX;
-       unsigned long maxdiv, mindiv, tmpdiv;
-       long bestrate = -ERANGE;
-       unsigned long bestdiv = 0;
-       unsigned long bestmul = 0;
-       unsigned long bestfrac = 0;
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
 
-       if (rate < characteristics->output[0].min ||
-           rate > characteristics->output[0].max)
-               return -ERANGE;
+       return sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
+}
 
-       if (!pll->characteristics->upll) {
-               mindiv = parent_rate / rate;
-               if (mindiv < 2)
-                       mindiv = 2;
+static const struct clk_ops sam9x60_frac_pll_ops = {
+       .prepare = sam9x60_frac_pll_prepare,
+       .unprepare = sam9x60_frac_pll_unprepare,
+       .is_prepared = sam9x60_frac_pll_is_prepared,
+       .recalc_rate = sam9x60_frac_pll_recalc_rate,
+       .round_rate = sam9x60_frac_pll_round_rate,
+       .set_rate = sam9x60_frac_pll_set_rate,
+};
 
-               maxdiv = DIV_ROUND_UP(parent_rate * PLL_MUL_MAX, rate);
-               if (maxdiv > PLL_DIV_MAX)
-                       maxdiv = PLL_DIV_MAX;
-       } else {
-               mindiv = maxdiv = UPLL_DIV;
-       }
+static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct sam9x60_div *div = to_sam9x60_div(core);
+       struct regmap *regmap = core->regmap;
+       unsigned long flags;
+       unsigned int val, cdiv;
 
-       for (tmpdiv = mindiv; tmpdiv <= maxdiv; tmpdiv++) {
-               unsigned long remainder;
-               unsigned long tmprate;
-               unsigned long tmpmul;
-               unsigned long tmpfrac = 0;
+       spin_lock_irqsave(core->lock, flags);
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+       regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+       cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
 
-               /*
-                * Calculate the multiplier associated with the current
-                * divider that provide the closest rate to the requested one.
-                */
-               tmpmul = mult_frac(rate, tmpdiv, parent_rate);
-               tmprate = mult_frac(parent_rate, tmpmul, tmpdiv);
-               remainder = rate - tmprate;
+       /* Stop if enabled an nothing changed. */
+       if (!!(val & core->layout->endiv_mask) && cdiv == div->div)
+               goto unlock;
 
-               if (remainder) {
-                       tmpfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * tmpdiv * (1 << 22),
-                                                       parent_rate);
+       regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+                          core->layout->div_mask | core->layout->endiv_mask,
+                          (div->div << core->layout->div_shift) |
+                          (1 << core->layout->endiv_shift));
 
-                       tmprate += DIV_ROUND_CLOSEST_ULL((u64)tmpfrac * parent_rate,
-                                                        tmpdiv * (1 << 22));
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+                          AT91_PMC_PLL_UPDT_UPDATE | core->id);
 
-                       if (tmprate > rate)
-                               remainder = tmprate - rate;
-                       else
-                               remainder = rate - tmprate;
-               }
+       while (!sam9x60_pll_ready(regmap, core->id))
+               cpu_relax();
 
-               /*
-                * Compare the remainder with the best remainder found until
-                * now and elect a new best multiplier/divider pair if the
-                * current remainder is smaller than the best one.
-                */
-               if (remainder < bestremainder) {
-                       bestremainder = remainder;
-                       bestdiv = tmpdiv;
-                       bestmul = tmpmul;
-                       bestrate = tmprate;
-                       bestfrac = tmpfrac;
+unlock:
+       spin_unlock_irqrestore(core->lock, flags);
+
+       return 0;
+}
+
+static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct regmap *regmap = core->regmap;
+       unsigned long flags;
+
+       spin_lock_irqsave(core->lock, flags);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+                          core->layout->endiv_mask, 0);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+                          AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+       spin_unlock_irqrestore(core->lock, flags);
+}
+
+static int sam9x60_div_pll_is_prepared(struct clk_hw *hw)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct regmap *regmap = core->regmap;
+       unsigned long flags;
+       unsigned int val;
+
+       spin_lock_irqsave(core->lock, flags);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+       regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+
+       spin_unlock_irqrestore(core->lock, flags);
+
+       return !!(val & core->layout->endiv_mask);
+}
+
+static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct sam9x60_div *div = to_sam9x60_div(core);
+
+       return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1));
+}
+
+static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
+                                       unsigned long *parent_rate,
+                                       unsigned long rate)
+{
+       const struct clk_pll_characteristics *characteristics =
+                                                       core->characteristics;
+       struct clk_hw *parent = clk_hw_get_parent(&core->hw);
+       unsigned long tmp_rate, tmp_parent_rate, tmp_diff;
+       long best_diff = -1, best_rate = -EINVAL;
+       u32 divid, best_div;
+
+       if (!rate)
+               return 0;
+
+       if (rate < characteristics->output[0].min ||
+           rate > characteristics->output[0].max)
+               return -ERANGE;
+
+       for (divid = 1; divid < core->layout->div_mask; divid++) {
+               tmp_parent_rate = clk_hw_round_rate(parent, rate * divid);
+               if (!tmp_parent_rate)
+                       continue;
+
+               tmp_rate = DIV_ROUND_CLOSEST_ULL(tmp_parent_rate, divid);
+               tmp_diff = abs(rate - tmp_rate);
+
+               if (best_diff < 0 || best_diff > tmp_diff) {
+                       *parent_rate = tmp_parent_rate;
+                       best_rate = tmp_rate;
+                       best_diff = tmp_diff;
+                       best_div = divid;
                }
 
-               /* We've found a perfect match!  */
-               if (!remainder)
+               if (!best_diff)
                        break;
        }
 
-       /* Check if bestrate is a valid output rate  */
-       if (bestrate < characteristics->output[0].min &&
-           bestrate > characteristics->output[0].max)
+       if (best_rate < characteristics->output[0].min ||
+           best_rate > characteristics->output[0].max)
                return -ERANGE;
 
-       if (update) {
-               pll->div = bestdiv - 1;
-               pll->mul = bestmul - 1;
-               pll->frac = bestfrac;
-       }
-
-       return bestrate;
+       return best_rate;
 }
 
-static long sam9x60_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long *parent_rate)
+static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *parent_rate)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
 
-       return sam9x60_pll_get_best_div_mul(pll, rate, *parent_rate, false);
+       return sam9x60_div_pll_compute_div(core, parent_rate, rate);
 }
 
-static int sam9x60_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate)
+static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long parent_rate)
 {
-       struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+       struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+       struct sam9x60_div *div = to_sam9x60_div(core);
+
+       div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
 
-       return sam9x60_pll_get_best_div_mul(pll, rate, parent_rate, true);
+       return 0;
 }
 
-static const struct clk_ops pll_ops = {
-       .prepare = sam9x60_pll_prepare,
-       .unprepare = sam9x60_pll_unprepare,
-       .is_prepared = sam9x60_pll_is_prepared,
-       .recalc_rate = sam9x60_pll_recalc_rate,
-       .round_rate = sam9x60_pll_round_rate,
-       .set_rate = sam9x60_pll_set_rate,
+static const struct clk_ops sam9x60_div_pll_ops = {
+       .prepare = sam9x60_div_pll_prepare,
+       .unprepare = sam9x60_div_pll_unprepare,
+       .is_prepared = sam9x60_div_pll_is_prepared,
+       .recalc_rate = sam9x60_div_pll_recalc_rate,
+       .round_rate = sam9x60_div_pll_round_rate,
+       .set_rate = sam9x60_div_pll_set_rate,
 };
 
 struct clk_hw * __init
-sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
-                        const char *name, const char *parent_name, u8 id,
-                        const struct clk_pll_characteristics *characteristics)
+sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+                             const char *name, const char *parent_name,
+                             struct clk_hw *parent_hw, u8 id,
+                             const struct clk_pll_characteristics *characteristics,
+                             const struct clk_pll_layout *layout, bool critical)
 {
-       struct sam9x60_pll *pll;
+       struct sam9x60_frac *frac;
        struct clk_hw *hw;
        struct clk_init_data init;
-       unsigned int pllr;
+       unsigned long parent_rate, flags;
+       unsigned int val;
        int ret;
 
-       if (id > PLL_MAX_ID)
+       if (id > PLL_MAX_ID || !lock || !parent_hw)
                return ERR_PTR(-EINVAL);
 
-       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
-       if (!pll)
+       frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+       if (!frac)
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.ops = &pll_ops;
        init.parent_names = &parent_name;
        init.num_parents = 1;
+       init.ops = &sam9x60_frac_pll_ops;
        init.flags = CLK_SET_RATE_GATE;
+       if (critical)
+               init.flags |= CLK_IS_CRITICAL;
+
+       frac->core.id = id;
+       frac->core.hw.init = &init;
+       frac->core.characteristics = characteristics;
+       frac->core.layout = layout;
+       frac->core.regmap = regmap;
+       frac->core.lock = lock;
+
+       spin_lock_irqsave(frac->core.lock, flags);
+       if (sam9x60_pll_ready(regmap, id)) {
+               regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                                  AT91_PMC_PLL_UPDT_ID_MSK, id);
+               regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
+               frac->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+               frac->frac = FIELD_GET(PMC_PLL_CTRL1_FRACR_MSK, val);
+       } else {
+               /*
+                * This means the PLL is not setup by bootloaders. In this
+                * case we need to set the minimum rate for it. Otherwise
+                * a clock child of this PLL may be enabled before setting
+                * its rate leading to enabling this PLL with unsupported
+                * rate. This will lead to PLL not being locked at all.
+                */
+               parent_rate = clk_hw_get_rate(parent_hw);
+               if (!parent_rate) {
+                       hw = ERR_PTR(-EINVAL);
+                       goto free;
+               }
+
+               ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+                                                       parent_rate, true);
+               if (ret <= 0) {
+                       hw = ERR_PTR(ret);
+                       goto free;
+               }
+       }
+       spin_unlock_irqrestore(frac->core.lock, flags);
+
+       hw = &frac->core.hw;
+       ret = clk_hw_register(NULL, hw);
+       if (ret) {
+               kfree(frac);
+               hw = ERR_PTR(ret);
+       }
 
-       pll->id = id;
-       pll->hw.init = &init;
-       pll->characteristics = characteristics;
-       pll->regmap = regmap;
-       pll->lock = lock;
+       return hw;
+
+free:
+       spin_unlock_irqrestore(frac->core.lock, flags);
+       kfree(frac);
+       return hw;
+}
+
+struct clk_hw * __init
+sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
+                            const char *name, const char *parent_name, u8 id,
+                            const struct clk_pll_characteristics *characteristics,
+                            const struct clk_pll_layout *layout, bool critical)
+{
+       struct sam9x60_div *div;
+       struct clk_hw *hw;
+       struct clk_init_data init;
+       unsigned long flags;
+       unsigned int val;
+       int ret;
+
+       if (id > PLL_MAX_ID || !lock)
+               return ERR_PTR(-EINVAL);
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+       init.ops = &sam9x60_div_pll_ops;
+       init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+                    CLK_SET_RATE_PARENT;
+       if (critical)
+               init.flags |= CLK_IS_CRITICAL;
+
+       div->core.id = id;
+       div->core.hw.init = &init;
+       div->core.characteristics = characteristics;
+       div->core.layout = layout;
+       div->core.regmap = regmap;
+       div->core.lock = lock;
+
+       spin_lock_irqsave(div->core.lock, flags);
+
+       regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+                          AT91_PMC_PLL_UPDT_ID_MSK, id);
+       regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+       div->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
 
-       regmap_write(regmap, AT91_PMC_PLL_UPDT, id);
-       regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr);
-       pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
-       regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr);
-       pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
+       spin_unlock_irqrestore(div->core.lock, flags);
 
-       hw = &pll->hw;
+       hw = &div->core.hw;
        ret = clk_hw_register(NULL, hw);
        if (ret) {
-               kfree(pll);
+               kfree(div);
                hw = ERR_PTR(ret);
        }
 
index c4b3877..f83ec0d 100644 (file)
@@ -34,7 +34,7 @@ static inline bool clk_system_ready(struct regmap *regmap, int id)
 
        regmap_read(regmap, AT91_PMC_SR, &status);
 
-       return status & (1 << id) ? 1 : 0;
+       return !!(status & (1 << id));
 }
 
 static int clk_system_prepare(struct clk_hw *hw)
@@ -74,7 +74,7 @@ static int clk_system_is_prepared(struct clk_hw *hw)
 
        regmap_read(sys->regmap, AT91_PMC_SR, &status);
 
-       return status & (1 << sys->id) ? 1 : 0;
+       return !!(status & (1 << sys->id));
 }
 
 static const struct clk_ops system_ops = {
index f1ef4e1..df9f3fc 100644 (file)
@@ -120,9 +120,11 @@ static const struct clk_ops utmi_ops = {
        .recalc_rate = clk_utmi_recalc_rate,
 };
 
-struct clk_hw * __init
-at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
-                      const char *name, const char *parent_name)
+static struct clk_hw * __init
+at91_clk_register_utmi_internal(struct regmap *regmap_pmc,
+                               struct regmap *regmap_sfr,
+                               const char *name, const char *parent_name,
+                               const struct clk_ops *ops, unsigned long flags)
 {
        struct clk_utmi *utmi;
        struct clk_hw *hw;
@@ -134,10 +136,10 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.ops = &utmi_ops;
+       init.ops = ops;
        init.parent_names = parent_name ? &parent_name : NULL;
        init.num_parents = parent_name ? 1 : 0;
-       init.flags = CLK_SET_RATE_GATE;
+       init.flags = flags;
 
        utmi->hw.init = &init;
        utmi->regmap_pmc = regmap_pmc;
@@ -152,3 +154,94 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
 
        return hw;
 }
+
+struct clk_hw * __init
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
+                      const char *name, const char *parent_name)
+{
+       return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name,
+                       parent_name, &utmi_ops, CLK_SET_RATE_GATE);
+}
+
+static int clk_utmi_sama7g5_prepare(struct clk_hw *hw)
+{
+       struct clk_utmi *utmi = to_clk_utmi(hw);
+       struct clk_hw *hw_parent;
+       unsigned long parent_rate;
+       unsigned int val;
+
+       hw_parent = clk_hw_get_parent(hw);
+       parent_rate = clk_hw_get_rate(hw_parent);
+
+       switch (parent_rate) {
+       case 16000000:
+               val = 0;
+               break;
+       case 20000000:
+               val = 2;
+               break;
+       case 24000000:
+               val = 3;
+               break;
+       case 32000000:
+               val = 5;
+               break;
+       default:
+               pr_err("UTMICK: unsupported main_xtal rate\n");
+               return -EINVAL;
+       }
+
+       regmap_write(utmi->regmap_pmc, AT91_PMC_XTALF, val);
+
+       return 0;
+
+}
+
+static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw)
+{
+       struct clk_utmi *utmi = to_clk_utmi(hw);
+       struct clk_hw *hw_parent;
+       unsigned long parent_rate;
+       unsigned int val;
+
+       hw_parent = clk_hw_get_parent(hw);
+       parent_rate = clk_hw_get_rate(hw_parent);
+
+       regmap_read(utmi->regmap_pmc, AT91_PMC_XTALF, &val);
+       switch (val & 0x7) {
+       case 0:
+               if (parent_rate == 16000000)
+                       return 1;
+               break;
+       case 2:
+               if (parent_rate == 20000000)
+                       return 1;
+               break;
+       case 3:
+               if (parent_rate == 24000000)
+                       return 1;
+               break;
+       case 5:
+               if (parent_rate == 32000000)
+                       return 1;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static const struct clk_ops sama7g5_utmi_ops = {
+       .prepare = clk_utmi_sama7g5_prepare,
+       .is_prepared = clk_utmi_sama7g5_is_prepared,
+       .recalc_rate = clk_utmi_recalc_rate,
+};
+
+struct clk_hw * __init
+at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name,
+                              const char *parent_name)
+{
+       return at91_clk_register_utmi_internal(regmap_pmc, NULL, name,
+                       parent_name, &sama7g5_utmi_ops, 0);
+}
index aa1754e..a50084d 100644 (file)
@@ -22,6 +22,8 @@
 
 #define SYSTEM_MAX_ID          31
 
+#define GCK_INDEX_DT_AUDIO_PLL 5
+
 #ifdef CONFIG_HAVE_AT91_AUDIO_PLL
 static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
 {
@@ -135,7 +137,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
                return;
 
        for_each_child_of_node(np, gcknp) {
-               bool pll_audio = false;
+               int chg_pid = INT_MIN;
 
                if (of_property_read_u32(gcknp, "reg", &id))
                        continue;
@@ -152,12 +154,13 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
                if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
                    (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
                     id == GCK_ID_CLASSD))
-                       pll_audio = true;
+                       chg_pid = GCK_INDEX_DT_AUDIO_PLL;
 
                hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
                                                 &dt_pcr_layout, name,
-                                                parent_names, num_parents,
-                                                id, pll_audio, &range);
+                                                parent_names, NULL,
+                                                num_parents, id, &range,
+                                                chg_pid);
                if (IS_ERR(hw))
                        continue;
 
@@ -460,7 +463,8 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
                                                                 &dt_pcr_layout,
                                                                 name,
                                                                 parent_name,
-                                                                id, &range);
+                                                                id, &range,
+                                                                INT_MIN);
                }
 
                if (IS_ERR(hw))
@@ -673,7 +677,8 @@ CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
 
 static void __init
 of_at91_clk_prog_setup(struct device_node *np,
-                      const struct clk_programmable_layout *layout)
+                      const struct clk_programmable_layout *layout,
+                      u32 *mux_table)
 {
        int num;
        u32 id;
@@ -707,7 +712,7 @@ of_at91_clk_prog_setup(struct device_node *np,
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, num_parents,
-                                                   id, layout);
+                                                   id, layout, mux_table);
                if (IS_ERR(hw))
                        continue;
 
@@ -717,21 +722,21 @@ of_at91_clk_prog_setup(struct device_node *np,
 
 static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
 {
-       of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
+       of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout, NULL);
 }
 CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
               of_at91rm9200_clk_prog_setup);
 
 static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
 {
-       of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
+       of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout, NULL);
 }
 CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
               of_at91sam9g45_clk_prog_setup);
 
 static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
 {
-       of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
+       of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout, NULL);
 }
 CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
               of_at91sam9x5_clk_prog_setup);
index df616f2..7b86aff 100644 (file)
@@ -54,8 +54,14 @@ struct clk_master_characteristics {
 
 struct clk_pll_layout {
        u32 pllr_mask;
-       u16 mul_mask;
+       u32 mul_mask;
+       u32 frac_mask;
+       u32 div_mask;
+       u32 endiv_mask;
        u8 mul_shift;
+       u8 frac_shift;
+       u8 div_shift;
+       u8 endiv_shift;
 };
 
 extern const struct clk_pll_layout at91rm9200_pll_layout;
@@ -122,8 +128,8 @@ struct clk_hw * __init
 at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
                            const struct clk_pcr_layout *layout,
                            const char *name, const char **parent_names,
-                           u8 num_parents, u8 id, bool pll_audio,
-                           const struct clk_range *range);
+                           u32 *mux_table, u8 num_parents, u8 id,
+                           const struct clk_range *range, int chg_pid);
 
 struct clk_hw * __init
 at91_clk_register_h32mx(struct regmap *regmap, const char *name,
@@ -155,13 +161,21 @@ at91_clk_register_master(struct regmap *regmap, const char *name,
                         const struct clk_master_characteristics *characteristics);
 
 struct clk_hw * __init
+at91_clk_sama7g5_register_master(struct regmap *regmap,
+                                const char *name, int num_parents,
+                                const char **parent_names, u32 *mux_table,
+                                spinlock_t *lock, u8 id, bool critical,
+                                int chg_pid);
+
+struct clk_hw * __init
 at91_clk_register_peripheral(struct regmap *regmap, const char *name,
                             const char *parent_name, u32 id);
 struct clk_hw * __init
 at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
                                    const struct clk_pcr_layout *layout,
                                    const char *name, const char *parent_name,
-                                   u32 id, const struct clk_range *range);
+                                   u32 id, const struct clk_range *range,
+                                   int chg_pid);
 
 struct clk_hw * __init
 at91_clk_register_pll(struct regmap *regmap, const char *name,
@@ -173,14 +187,23 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
                         const char *parent_name);
 
 struct clk_hw * __init
-sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
-                        const char *name, const char *parent_name, u8 id,
-                        const struct clk_pll_characteristics *characteristics);
+sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
+                            const char *name, const char *parent_name, u8 id,
+                            const struct clk_pll_characteristics *characteristics,
+                            const struct clk_pll_layout *layout, bool critical);
+
+struct clk_hw * __init
+sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+                             const char *name, const char *parent_name,
+                             struct clk_hw *parent_hw, u8 id,
+                             const struct clk_pll_characteristics *characteristics,
+                             const struct clk_pll_layout *layout, bool critical);
 
 struct clk_hw * __init
 at91_clk_register_programmable(struct regmap *regmap, const char *name,
                               const char **parent_names, u8 num_parents, u8 id,
-                              const struct clk_programmable_layout *layout);
+                              const struct clk_programmable_layout *layout,
+                              u32 *mux_table);
 
 struct clk_hw * __init
 at91_clk_register_sam9260_slow(struct regmap *regmap,
@@ -213,6 +236,10 @@ struct clk_hw * __init
 at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
                       const char *name, const char *parent_name);
 
+struct clk_hw * __init
+at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name,
+                              const char *parent_name);
+
 #ifdef CONFIG_PM
 void pmc_register_id(u8 id);
 void pmc_register_pck(u8 pck);
index 3e20aa6..ab6318c 100644 (file)
@@ -22,7 +22,7 @@ static const struct clk_master_layout sam9x60_master_layout = {
 };
 
 static const struct clk_range plla_outputs[] = {
-       { .min = 300000000, .max = 600000000 },
+       { .min = 2343750, .max = 1200000000 },
 };
 
 static const struct clk_pll_characteristics plla_characteristics = {
@@ -42,6 +42,20 @@ static const struct clk_pll_characteristics upll_characteristics = {
        .upll = true,
 };
 
+static const struct clk_pll_layout pll_frac_layout = {
+       .mul_mask = GENMASK(31, 24),
+       .frac_mask = GENMASK(21, 0),
+       .mul_shift = 24,
+       .frac_shift = 0,
+};
+
+static const struct clk_pll_layout pll_div_layout = {
+       .div_mask = GENMASK(7, 0),
+       .endiv_mask = BIT(29),
+       .div_shift = 0,
+       .endiv_shift = 29,
+};
+
 static const struct clk_programmable_layout sam9x60_programmable_layout = {
        .pres_mask = 0xff,
        .pres_shift = 8,
@@ -156,6 +170,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
        const char *td_slck_name, *md_slck_name, *mainxtal_name;
        struct pmc_data *sam9x60_pmc;
        const char *parent_names[6];
+       struct clk_hw *main_osc_hw;
        struct regmap *regmap;
        struct clk_hw *hw;
        int i;
@@ -178,7 +193,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
@@ -189,7 +204,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
        if (!sam9x60_pmc)
                return;
 
-       hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 24000000,
+       hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
                                           50000000);
        if (IS_ERR(hw))
                goto err_free;
@@ -200,6 +215,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
                                        bypass);
        if (IS_ERR(hw))
                goto err_free;
+       main_osc_hw = hw;
 
        parent_names[0] = "main_rc_osc";
        parent_names[1] = "main_osc";
@@ -209,15 +225,31 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
 
        sam9x60_pmc->chws[PMC_MAIN] = hw;
 
-       hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "pllack",
-                                     "mainck", 0, &plla_characteristics);
+       hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "pllack_fracck",
+                                          "mainck", sam9x60_pmc->chws[PMC_MAIN],
+                                          0, &plla_characteristics,
+                                          &pll_frac_layout, true);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck",
+                                         "pllack_fracck", 0, &plla_characteristics,
+                                         &pll_div_layout, true);
        if (IS_ERR(hw))
                goto err_free;
 
        sam9x60_pmc->chws[PMC_PLLACK] = hw;
 
-       hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
-                                     "main_osc", 1, &upll_characteristics);
+       hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "upllck_fracck",
+                                          "main_osc", main_osc_hw, 1,
+                                          &upll_characteristics,
+                                          &pll_frac_layout, false);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck",
+                                         "upllck_fracck", 1, &upll_characteristics,
+                                         &pll_div_layout, false);
        if (IS_ERR(hw))
                goto err_free;
 
@@ -225,7 +257,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
 
        parent_names[0] = md_slck_name;
        parent_names[1] = "mainck";
-       parent_names[2] = "pllack";
+       parent_names[2] = "pllack_divck";
        hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
                                      &sam9x60_master_layout,
                                      &mck_characteristics);
@@ -234,8 +266,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
 
        sam9x60_pmc->chws[PMC_MCK] = hw;
 
-       parent_names[0] = "pllack";
-       parent_names[1] = "upllck";
+       parent_names[0] = "pllack_divck";
+       parent_names[1] = "upllck_divck";
        parent_names[2] = "main_osc";
        hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
        if (IS_ERR(hw))
@@ -245,8 +277,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
        parent_names[1] = td_slck_name;
        parent_names[2] = "mainck";
        parent_names[3] = "masterck";
-       parent_names[4] = "pllack";
-       parent_names[5] = "upllck";
+       parent_names[4] = "pllack_divck";
+       parent_names[5] = "upllck_divck";
        for (i = 0; i < 8; i++) {
                char name[6];
 
@@ -254,7 +286,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 6, i,
-                                                   &sam9x60_programmable_layout);
+                                                   &sam9x60_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -277,7 +310,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
                                                         sam9x60_periphck[i].n,
                                                         "masterck",
                                                         sam9x60_periphck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -288,10 +321,9 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
                hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
                                                 &sam9x60_pcr_layout,
                                                 sam9x60_gck[i].n,
-                                                parent_names, 6,
+                                                parent_names, NULL, 6,
                                                 sam9x60_gck[i].id,
-                                                false,
-                                                &sam9x60_gck[i].r);
+                                                &sam9x60_gck[i].r, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
index d69421d..8b22076 100644 (file)
@@ -116,21 +116,20 @@ static const struct {
        char *n;
        u8 id;
        struct clk_range r;
-       bool pll;
+       int chg_pid;
 } sama5d2_gck[] = {
-       { .n = "sdmmc0_gclk", .id = 31, },
-       { .n = "sdmmc1_gclk", .id = 32, },
-       { .n = "tcb0_gclk",   .id = 35, .r = { .min = 0, .max = 83000000 }, },
-       { .n = "tcb1_gclk",   .id = 36, .r = { .min = 0, .max = 83000000 }, },
-       { .n = "pwm_gclk",    .id = 38, .r = { .min = 0, .max = 83000000 }, },
-       { .n = "isc_gclk",    .id = 46, },
-       { .n = "pdmic_gclk",  .id = 48, },
-       { .n = "i2s0_gclk",   .id = 54, .pll = true },
-       { .n = "i2s1_gclk",   .id = 55, .pll = true },
-       { .n = "can0_gclk",   .id = 56, .r = { .min = 0, .max = 80000000 }, },
-       { .n = "can1_gclk",   .id = 57, .r = { .min = 0, .max = 80000000 }, },
-       { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 },
-         .pll = true },
+       { .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
+       { .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
+       { .n = "tcb0_gclk",   .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+       { .n = "tcb1_gclk",   .id = 36, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+       { .n = "pwm_gclk",    .id = 38, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
+       { .n = "isc_gclk",    .id = 46, .chg_pid = INT_MIN, },
+       { .n = "pdmic_gclk",  .id = 48, .chg_pid = INT_MIN, },
+       { .n = "i2s0_gclk",   .id = 54, .chg_pid = 5, },
+       { .n = "i2s1_gclk",   .id = 55, .chg_pid = 5, },
+       { .n = "can0_gclk",   .id = 56, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
+       { .n = "can1_gclk",   .id = 57, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
+       { .n = "classd_gclk", .id = 59, .chg_pid = 5, .r = { .min = 0, .max = 100000000 }, },
 };
 
 static const struct clk_programmable_layout sama5d2_programmable_layout = {
@@ -269,7 +268,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 6, i,
-                                                   &sama5d2_programmable_layout);
+                                                   &sama5d2_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -292,7 +292,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
                                                         sama5d2_periphck[i].n,
                                                         "masterck",
                                                         sama5d2_periphck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -305,7 +305,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
                                                         sama5d2_periph32ck[i].n,
                                                         "h32mxck",
                                                         sama5d2_periph32ck[i].id,
-                                                        &sama5d2_periph32ck[i].r);
+                                                        &sama5d2_periph32ck[i].r,
+                                                        INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -322,10 +323,10 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
                hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
                                                 &sama5d2_pcr_layout,
                                                 sama5d2_gck[i].n,
-                                                parent_names, 6,
+                                                parent_names, NULL, 6,
                                                 sama5d2_gck[i].id,
-                                                sama5d2_gck[i].pll,
-                                                &sama5d2_gck[i].r);
+                                                &sama5d2_gck[i].r,
+                                                sama5d2_gck[i].chg_pid);
                if (IS_ERR(hw))
                        goto err_free;
 
index 5e4e44d..7c6e0a5 100644 (file)
@@ -121,7 +121,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
                return;
        mainxtal_name = of_clk_get_parent_name(np, i);
 
-       regmap = syscon_node_to_regmap(np);
+       regmap = device_node_to_regmap(np);
        if (IS_ERR(regmap))
                return;
 
@@ -200,7 +200,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &at91sam9x5_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -223,7 +224,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
                                                         sama5d3_periphck[i].n,
                                                         "masterck",
                                                         sama5d3_periphck[i].id,
-                                                        &sama5d3_periphck[i].r);
+                                                        &sama5d3_periphck[i].r,
+                                                        INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
index 662ff5f..92d8d41 100644 (file)
@@ -223,7 +223,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 5, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &at91sam9x5_programmable_layout,
+                                                   NULL);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -246,7 +247,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
                                                         sama5d4_periphck[i].n,
                                                         "masterck",
                                                         sama5d4_periphck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
@@ -259,7 +260,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
                                                         sama5d4_periph32ck[i].n,
                                                         "h32mxck",
                                                         sama5d4_periph32ck[i].id,
-                                                        &range);
+                                                        &range, INT_MIN);
                if (IS_ERR(hw))
                        goto err_free;
 
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
new file mode 100644 (file)
index 0000000..0db2ab3
--- /dev/null
@@ -0,0 +1,1059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7G5 PMC code.
+ *
+ * Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+#define SAMA7G5_INIT_TABLE(_table, _count)             \
+       do {                                            \
+               u8 _i;                                  \
+               for (_i = 0; _i < (_count); _i++)       \
+                       (_table)[_i] = _i;              \
+       } while (0)
+
+#define SAMA7G5_FILL_TABLE(_to, _from, _count)         \
+       do {                                            \
+               u8 _i;                                  \
+               for (_i = 0; _i < (_count); _i++) {     \
+                       (_to)[_i] = (_from)[_i];        \
+               }                                       \
+       } while (0)
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+/**
+ * PLL clocks identifiers
+ * @PLL_ID_CPU:                CPU PLL identifier
+ * @PLL_ID_SYS:                System PLL identifier
+ * @PLL_ID_DDR:                DDR PLL identifier
+ * @PLL_ID_IMG:                Image subsystem PLL identifier
+ * @PLL_ID_BAUD:       Baud PLL identifier
+ * @PLL_ID_AUDIO:      Audio PLL identifier
+ * @PLL_ID_ETH:                Ethernet PLL identifier
+ */
+enum pll_ids {
+       PLL_ID_CPU,
+       PLL_ID_SYS,
+       PLL_ID_DDR,
+       PLL_ID_IMG,
+       PLL_ID_BAUD,
+       PLL_ID_AUDIO,
+       PLL_ID_ETH,
+       PLL_ID_MAX,
+};
+
+/**
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC:     fractional PLL identifier
+ * @PLL_TYPE_DIV:      divider PLL identifier
+ */
+enum pll_type {
+       PLL_TYPE_FRAC,
+       PLL_TYPE_DIV,
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+       .mul_mask       = GENMASK(31, 24),
+       .frac_mask      = GENMASK(21, 0),
+       .mul_shift      = 24,
+       .frac_shift     = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+       .div_mask       = GENMASK(7, 0),
+       .endiv_mask     = BIT(29),
+       .div_shift      = 0,
+       .endiv_shift    = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+       .div_mask       = GENMASK(19, 12),
+       .endiv_mask     = BIT(30),
+       .div_shift      = 12,
+       .endiv_shift    = 30,
+};
+
+/**
+ * PLL clocks description
+ * @n:         clock name
+ * @p:         clock parent
+ * @l:         clock layout
+ * @t:         clock type
+ * @f:         true if clock is critical and cannot be disabled
+ * @eid:       export index in sama7g5->chws[] array
+ */
+static const struct {
+       const char *n;
+       const char *p;
+       const struct clk_pll_layout *l;
+       u8 t;
+       u8 c;
+       u8 eid;
+} sama7g5_plls[][PLL_ID_MAX] = {
+       [PLL_ID_CPU] = {
+               { .n = "cpupll_fracck",
+                 .p = "mainck",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC,
+                 .c = 1, },
+
+               { .n = "cpupll_divpmcck",
+                 .p = "cpupll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV,
+                 .c = 1, },
+       },
+
+       [PLL_ID_SYS] = {
+               { .n = "syspll_fracck",
+                 .p = "mainck",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC,
+                 .c = 1, },
+
+               { .n = "syspll_divpmcck",
+                 .p = "syspll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV,
+                 .c = 1, },
+       },
+
+       [PLL_ID_DDR] = {
+               { .n = "ddrpll_fracck",
+                 .p = "mainck",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC,
+                 .c = 1, },
+
+               { .n = "ddrpll_divpmcck",
+                 .p = "ddrpll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV,
+                 .c = 1, },
+       },
+
+       [PLL_ID_IMG] = {
+               { .n = "imgpll_fracck",
+                 .p = "mainck",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC, },
+
+               { .n = "imgpll_divpmcck",
+                 .p = "imgpll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV, },
+       },
+
+       [PLL_ID_BAUD] = {
+               { .n = "baudpll_fracck",
+                 .p = "mainck",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC, },
+
+               { .n = "baudpll_divpmcck",
+                 .p = "baudpll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV, },
+       },
+
+       [PLL_ID_AUDIO] = {
+               { .n = "audiopll_fracck",
+                 .p = "main_xtal",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC, },
+
+               { .n = "audiopll_divpmcck",
+                 .p = "audiopll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV,
+                 .eid = PMC_I2S0_MUX, },
+
+               { .n = "audiopll_diviock",
+                 .p = "audiopll_fracck",
+                 .l = &pll_layout_divio,
+                 .t = PLL_TYPE_DIV,
+                 .eid = PMC_I2S1_MUX, },
+       },
+
+       [PLL_ID_ETH] = {
+               { .n = "ethpll_fracck",
+                 .p = "main_xtal",
+                 .l = &pll_layout_frac,
+                 .t = PLL_TYPE_FRAC, },
+
+               { .n = "ethpll_divpmcck",
+                 .p = "ethpll_fracck",
+                 .l = &pll_layout_divpmc,
+                 .t = PLL_TYPE_DIV, },
+       },
+};
+
+/**
+ * Master clock (MCK[1..4]) description
+ * @n:                 clock name
+ * @ep:                        extra parents names array
+ * @ep_chg_chg_id:     index in parents array that specifies the changeable
+ *                     parent
+ * @ep_count:          extra parents count
+ * @ep_mux_table:      mux table for extra parents
+ * @id:                        clock id
+ * @c:                 true if clock is critical and cannot be disabled
+ */
+static const struct {
+       const char *n;
+       const char *ep[4];
+       int ep_chg_id;
+       u8 ep_count;
+       u8 ep_mux_table[4];
+       u8 id;
+       u8 c;
+} sama7g5_mckx[] = {
+       { .n = "mck1",
+         .id = 1,
+         .ep = { "syspll_divpmcck", },
+         .ep_mux_table = { 5, },
+         .ep_count = 1,
+         .ep_chg_id = INT_MIN,
+         .c = 1, },
+
+       { .n = "mck2",
+         .id = 2,
+         .ep = { "ddrpll_divpmcck", },
+         .ep_mux_table = { 6, },
+         .ep_count = 1,
+         .ep_chg_id = INT_MIN,
+         .c = 1, },
+
+       { .n = "mck3",
+         .id = 3,
+         .ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", },
+         .ep_mux_table = { 5, 6, 7, },
+         .ep_count = 3,
+         .ep_chg_id = 6, },
+
+       { .n = "mck4",
+         .id = 4,
+         .ep = { "syspll_divpmcck", },
+         .ep_mux_table = { 5, },
+         .ep_count = 1,
+         .ep_chg_id = INT_MIN,
+         .c = 1, },
+};
+
+/**
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+       const char *n;
+       const char *p;
+       u8 id;
+} sama7g5_systemck[] = {
+       { .n = "pck0",          .p = "prog0", .id = 8, },
+       { .n = "pck1",          .p = "prog1", .id = 9, },
+       { .n = "pck2",          .p = "prog2", .id = 10, },
+       { .n = "pck3",          .p = "prog3", .id = 11, },
+       { .n = "pck4",          .p = "prog4", .id = 12, },
+       { .n = "pck5",          .p = "prog5", .id = 13, },
+       { .n = "pck6",          .p = "prog6", .id = 14, },
+       { .n = "pck7",          .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, };
+
+/**
+ * Peripheral clock description
+ * @n:         clock name
+ * @p:         clock parent name
+ * @r:         clock range values
+ * @id:                clock id
+ * @chgp:      index in parent array of the changeable parent
+ */
+static const struct {
+       const char *n;
+       const char *p;
+       struct clk_range r;
+       u8 chgp;
+       u8 id;
+} sama7g5_periphck[] = {
+       { .n = "pioA_clk",      .p = "mck0", .id = 11, },
+       { .n = "sfr_clk",       .p = "mck1", .id = 19, },
+       { .n = "hsmc_clk",      .p = "mck1", .id = 21, },
+       { .n = "xdmac0_clk",    .p = "mck1", .id = 22, },
+       { .n = "xdmac1_clk",    .p = "mck1", .id = 23, },
+       { .n = "xdmac2_clk",    .p = "mck1", .id = 24, },
+       { .n = "acc_clk",       .p = "mck1", .id = 25, },
+       { .n = "aes_clk",       .p = "mck1", .id = 27, },
+       { .n = "tzaesbasc_clk", .p = "mck1", .id = 28, },
+       { .n = "asrc_clk",      .p = "mck1", .id = 30, .r = { .max = 200000000, }, },
+       { .n = "cpkcc_clk",     .p = "mck0", .id = 32, },
+       { .n = "csi_clk",       .p = "mck3", .id = 33, .r = { .max = 266000000, }, .chgp = 1, },
+       { .n = "csi2dc_clk",    .p = "mck3", .id = 34, .r = { .max = 266000000, }, .chgp = 1, },
+       { .n = "eic_clk",       .p = "mck1", .id = 37, },
+       { .n = "flex0_clk",     .p = "mck1", .id = 38, },
+       { .n = "flex1_clk",     .p = "mck1", .id = 39, },
+       { .n = "flex2_clk",     .p = "mck1", .id = 40, },
+       { .n = "flex3_clk",     .p = "mck1", .id = 41, },
+       { .n = "flex4_clk",     .p = "mck1", .id = 42, },
+       { .n = "flex5_clk",     .p = "mck1", .id = 43, },
+       { .n = "flex6_clk",     .p = "mck1", .id = 44, },
+       { .n = "flex7_clk",     .p = "mck1", .id = 45, },
+       { .n = "flex8_clk",     .p = "mck1", .id = 46, },
+       { .n = "flex9_clk",     .p = "mck1", .id = 47, },
+       { .n = "flex10_clk",    .p = "mck1", .id = 48, },
+       { .n = "flex11_clk",    .p = "mck1", .id = 49, },
+       { .n = "gmac0_clk",     .p = "mck1", .id = 51, },
+       { .n = "gmac1_clk",     .p = "mck1", .id = 52, },
+       { .n = "icm_clk",       .p = "mck1", .id = 55, },
+       { .n = "isc_clk",       .p = "mck3", .id = 56, .r = { .max = 266000000, }, .chgp = 1, },
+       { .n = "i2smcc0_clk",   .p = "mck1", .id = 57, .r = { .max = 200000000, }, },
+       { .n = "i2smcc1_clk",   .p = "mck1", .id = 58, .r = { .max = 200000000, }, },
+       { .n = "matrix_clk",    .p = "mck1", .id = 60, },
+       { .n = "mcan0_clk",     .p = "mck1", .id = 61, .r = { .max = 200000000, }, },
+       { .n = "mcan1_clk",     .p = "mck1", .id = 62, .r = { .max = 200000000, }, },
+       { .n = "mcan2_clk",     .p = "mck1", .id = 63, .r = { .max = 200000000, }, },
+       { .n = "mcan3_clk",     .p = "mck1", .id = 64, .r = { .max = 200000000, }, },
+       { .n = "mcan4_clk",     .p = "mck1", .id = 65, .r = { .max = 200000000, }, },
+       { .n = "mcan5_clk",     .p = "mck1", .id = 66, .r = { .max = 200000000, }, },
+       { .n = "pdmc0_clk",     .p = "mck1", .id = 68, .r = { .max = 200000000, }, },
+       { .n = "pdmc1_clk",     .p = "mck1", .id = 69, .r = { .max = 200000000, }, },
+       { .n = "pit64b0_clk",   .p = "mck1", .id = 70, },
+       { .n = "pit64b1_clk",   .p = "mck1", .id = 71, },
+       { .n = "pit64b2_clk",   .p = "mck1", .id = 72, },
+       { .n = "pit64b3_clk",   .p = "mck1", .id = 73, },
+       { .n = "pit64b4_clk",   .p = "mck1", .id = 74, },
+       { .n = "pit64b5_clk",   .p = "mck1", .id = 75, },
+       { .n = "pwm_clk",       .p = "mck1", .id = 77, },
+       { .n = "qspi0_clk",     .p = "mck1", .id = 78, },
+       { .n = "qspi1_clk",     .p = "mck1", .id = 79, },
+       { .n = "sdmmc0_clk",    .p = "mck1", .id = 80, },
+       { .n = "sdmmc1_clk",    .p = "mck1", .id = 81, },
+       { .n = "sdmmc2_clk",    .p = "mck1", .id = 82, },
+       { .n = "sha_clk",       .p = "mck1", .id = 83, },
+       { .n = "spdifrx_clk",   .p = "mck1", .id = 84, .r = { .max = 200000000, }, },
+       { .n = "spdiftx_clk",   .p = "mck1", .id = 85, .r = { .max = 200000000, }, },
+       { .n = "ssc0_clk",      .p = "mck1", .id = 86, .r = { .max = 200000000, }, },
+       { .n = "ssc1_clk",      .p = "mck1", .id = 87, .r = { .max = 200000000, }, },
+       { .n = "tcb0_ch0_clk",  .p = "mck1", .id = 88, .r = { .max = 200000000, }, },
+       { .n = "tcb0_ch1_clk",  .p = "mck1", .id = 89, .r = { .max = 200000000, }, },
+       { .n = "tcb0_ch2_clk",  .p = "mck1", .id = 90, .r = { .max = 200000000, }, },
+       { .n = "tcb1_ch0_clk",  .p = "mck1", .id = 91, .r = { .max = 200000000, }, },
+       { .n = "tcb1_ch1_clk",  .p = "mck1", .id = 92, .r = { .max = 200000000, }, },
+       { .n = "tcb1_ch2_clk",  .p = "mck1", .id = 93, .r = { .max = 200000000, }, },
+       { .n = "tcpca_clk",     .p = "mck1", .id = 94, },
+       { .n = "tcpcb_clk",     .p = "mck1", .id = 95, },
+       { .n = "tdes_clk",      .p = "mck1", .id = 96, },
+       { .n = "trng_clk",      .p = "mck1", .id = 97, },
+       { .n = "udphsa_clk",    .p = "mck1", .id = 104, },
+       { .n = "udphsb_clk",    .p = "mck1", .id = 105, },
+       { .n = "uhphs_clk",     .p = "mck1", .id = 106, },
+};
+
+/**
+ * Generic clock description
+ * @n:                 clock name
+ * @pp:                        PLL parents
+ * @pp_mux_table:      PLL parents mux table
+ * @r:                 clock output range
+ * @pp_chg_id:         id in parrent array of changeable PLL parent
+ * @pp_count:          PLL parents count
+ * @id:                        clock id
+ */
+static const struct {
+       const char *n;
+       const char *pp[8];
+       const char pp_mux_table[8];
+       struct clk_range r;
+       int pp_chg_id;
+       u8 pp_count;
+       u8 id;
+} sama7g5_gck[] = {
+       { .n  = "adc_gclk",
+         .id = 26,
+         .r = { .max = 100000000, },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 7, 9, },
+         .pp_count = 3,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "asrc_gclk",
+         .id = 30,
+         .r = { .max = 200000000 },
+         .pp = { "audiopll_divpmcck", },
+         .pp_mux_table = { 9, },
+         .pp_count = 1,
+         .pp_chg_id = 4, },
+
+       { .n  = "csi_gclk",
+         .id = 33,
+         .r = { .max = 27000000  },
+         .pp = { "ddrpll_divpmcck", "imgpll_divpmcck", },
+         .pp_mux_table = { 6, 7, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex0_gclk",
+         .id = 38,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex1_gclk",
+         .id = 39,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex2_gclk",
+         .id = 40,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex3_gclk",
+         .id = 41,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex4_gclk",
+         .id = 42,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex5_gclk",
+         .id = 43,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex6_gclk",
+         .id = 44,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex7_gclk",
+         .id = 45,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex8_gclk",
+         .id = 46,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex9_gclk",
+         .id = 47,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex10_gclk",
+         .id = 48,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "flex11_gclk",
+         .id = 49,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "gmac0_gclk",
+         .id = 51,
+         .r = { .max = 125000000 },
+         .pp = { "ethpll_divpmcck", },
+         .pp_mux_table = { 10, },
+         .pp_count = 1,
+         .pp_chg_id = 4, },
+
+       { .n  = "gmac1_gclk",
+         .id = 52,
+         .r = { .max = 50000000  },
+         .pp = { "ethpll_divpmcck", },
+         .pp_mux_table = { 10, },
+         .pp_count = 1,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "gmac0_tsu_gclk",
+         .id = 53,
+         .r = { .max = 300000000 },
+         .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 9, 10, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "gmac1_tsu_gclk",
+         .id = 54,
+         .r = { .max = 300000000 },
+         .pp = { "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 9, 10, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "i2smcc0_gclk",
+         .id = 57,
+         .r = { .max = 100000000 },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "i2smcc1_gclk",
+         .id = 58,
+         .r = { .max = 100000000 },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "mcan0_gclk",
+         .id = 61,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "mcan1_gclk",
+         .id = 62,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "mcan2_gclk",
+         .id = 63,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "mcan3_gclk",
+         .id = 64,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "mcan4_gclk",
+         .id = 65,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "mcan5_gclk",
+         .id = 66,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pdmc0_gclk",
+         .id = 68,
+         .r = { .max = 50000000  },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pdmc1_gclk",
+         .id = 69,
+         .r = { .max = 50000000, },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b0_gclk",
+         .id = 70,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b1_gclk",
+         .id = 71,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b2_gclk",
+         .id = 72,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b3_gclk",
+         .id = 73,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b4_gclk",
+         .id = 74,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "pit64b5_gclk",
+         .id = 75,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "qspi0_gclk",
+         .id = 78,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "qspi1_gclk",
+         .id = 79,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "sdmmc0_gclk",
+         .id = 80,
+         .r = { .max = 208000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "sdmmc1_gclk",
+         .id = 81,
+         .r = { .max = 208000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "sdmmc2_gclk",
+         .id = 82,
+         .r = { .max = 208000000 },
+         .pp = { "syspll_divpmcck", "baudpll_divpmcck", },
+         .pp_mux_table = { 5, 8, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "spdifrx_gclk",
+         .id = 84,
+         .r = { .max = 150000000 },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n = "spdiftx_gclk",
+         .id = 85,
+         .r = { .max = 25000000  },
+         .pp = { "syspll_divpmcck", "audiopll_divpmcck", },
+         .pp_mux_table = { 5, 9, },
+         .pp_count = 2,
+         .pp_chg_id = 5, },
+
+       { .n  = "tcb0_ch0_gclk",
+         .id = 88,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "tcb1_ch0_gclk",
+         .id = 91,
+         .r = { .max = 200000000 },
+         .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck",
+                 "audiopll_divpmcck", "ethpll_divpmcck", },
+         .pp_mux_table = { 5, 7, 8, 9, 10, },
+         .pp_count = 5,
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "tcpca_gclk",
+         .id = 94,
+         .r = { .max = 32768, },
+         .pp_chg_id = INT_MIN, },
+
+       { .n  = "tcpcb_gclk",
+         .id = 95,
+         .r = { .max = 32768, },
+         .pp_chg_id = INT_MIN, },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+       { .min = 2343750, .max = 1200000000 },
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+       .input = { .min = 12000000, .max = 50000000 },
+       .num_output = ARRAY_SIZE(pll_outputs),
+       .output = pll_outputs,
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+       .output = { .min = 140000000, .max = 200000000 },
+       .divisors = { 1, 2, 4, 3 },
+       .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+       .mask = 0x373,
+       .pres_shift = 4,
+       .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+       .pres_mask = 0xff,
+       .pres_shift = 8,
+       .css_mask = 0x1f,
+       .have_slck_mck = 0,
+       .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7g5_pcr_layout = {
+       .offset = 0x88,
+       .cmd = BIT(31),
+       .gckcss_mask = GENMASK(12, 8),
+       .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7g5_pmc_setup(struct device_node *np)
+{
+       const char *td_slck_name, *md_slck_name, *mainxtal_name;
+       struct pmc_data *sama7g5_pmc;
+       const char *parent_names[10];
+       void **alloc_mem = NULL;
+       int alloc_mem_size = 0;
+       struct regmap *regmap;
+       struct clk_hw *hw;
+       bool bypass;
+       int i, j;
+
+       i = of_property_match_string(np, "clock-names", "td_slck");
+       if (i < 0)
+               return;
+
+       td_slck_name = of_clk_get_parent_name(np, i);
+
+       i = of_property_match_string(np, "clock-names", "md_slck");
+       if (i < 0)
+               return;
+
+       md_slck_name = of_clk_get_parent_name(np, i);
+
+       i = of_property_match_string(np, "clock-names", "main_xtal");
+       if (i < 0)
+               return;
+
+       mainxtal_name = of_clk_get_parent_name(np, i);
+
+       regmap = device_node_to_regmap(np);
+       if (IS_ERR(regmap))
+               return;
+
+       sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+                                       nck(sama7g5_systemck),
+                                       nck(sama7g5_periphck),
+                                       nck(sama7g5_gck));
+       if (!sama7g5_pmc)
+               return;
+
+       alloc_mem = kmalloc(sizeof(void *) *
+                           (ARRAY_SIZE(sama7g5_mckx) + ARRAY_SIZE(sama7g5_gck)),
+                           GFP_KERNEL);
+       if (!alloc_mem)
+               goto err_free;
+
+       hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+                                          50000000);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+       hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+                                       bypass);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       parent_names[0] = "main_rc_osc";
+       parent_names[1] = "main_osc";
+       hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       sama7g5_pmc->chws[PMC_MAIN] = hw;
+
+       for (i = 0; i < PLL_ID_MAX; i++) {
+               for (j = 0; j < 3; j++) {
+                       struct clk_hw *parent_hw;
+
+                       if (!sama7g5_plls[i][j].n)
+                               continue;
+
+                       switch (sama7g5_plls[i][j].t) {
+                       case PLL_TYPE_FRAC:
+                               if (!strcmp(sama7g5_plls[i][j].p, "mainck"))
+                                       parent_hw = sama7g5_pmc->chws[PMC_MAIN];
+                               else
+                                       parent_hw = __clk_get_hw(of_clk_get_by_name(np,
+                                               sama7g5_plls[i][j].p));
+
+                               hw = sam9x60_clk_register_frac_pll(regmap,
+                                       &pmc_pll_lock, sama7g5_plls[i][j].n,
+                                       sama7g5_plls[i][j].p, parent_hw, i,
+                                       &pll_characteristics,
+                                       sama7g5_plls[i][j].l,
+                                       sama7g5_plls[i][j].c);
+                               break;
+
+                       case PLL_TYPE_DIV:
+                               hw = sam9x60_clk_register_div_pll(regmap,
+                                       &pmc_pll_lock, sama7g5_plls[i][j].n,
+                                       sama7g5_plls[i][j].p, i,
+                                       &pll_characteristics,
+                                       sama7g5_plls[i][j].l,
+                                       sama7g5_plls[i][j].c);
+                               break;
+
+                       default:
+                               continue;
+                       }
+
+                       if (IS_ERR(hw))
+                               goto err_free;
+
+                       if (sama7g5_plls[i][j].eid)
+                               sama7g5_pmc->chws[sama7g5_plls[i][j].eid] = hw;
+               }
+       }
+
+       parent_names[0] = md_slck_name;
+       parent_names[1] = "mainck";
+       parent_names[2] = "cpupll_divpmcck";
+       parent_names[3] = "syspll_divpmcck";
+       hw = at91_clk_register_master(regmap, "mck0", 4, parent_names,
+                                     &mck0_layout, &mck0_characteristics);
+       if (IS_ERR(hw))
+               goto err_free;
+
+       sama7g5_pmc->chws[PMC_MCK] = hw;
+
+       parent_names[0] = md_slck_name;
+       parent_names[1] = td_slck_name;
+       parent_names[2] = "mainck";
+       parent_names[3] = "mck0";
+       for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) {
+               u8 num_parents = 4 + sama7g5_mckx[i].ep_count;
+               u32 *mux_table;
+
+               mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+                                         GFP_KERNEL);
+               if (!mux_table)
+                       goto err_free;
+
+               SAMA7G5_INIT_TABLE(mux_table, 4);
+               SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_mckx[i].ep_mux_table,
+                                  sama7g5_mckx[i].ep_count);
+               SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_mckx[i].ep,
+                                  sama7g5_mckx[i].ep_count);
+
+               hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
+                                  num_parents, parent_names, mux_table,
+                                  &pmc_mckX_lock, sama7g5_mckx[i].id,
+                                  sama7g5_mckx[i].c,
+                                  sama7g5_mckx[i].ep_chg_id);
+               if (IS_ERR(hw))
+                       goto err_free;
+
+               alloc_mem[alloc_mem_size++] = mux_table;
+       }
+
+       hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", "main_xtal");
+       if (IS_ERR(hw))
+               goto err_free;
+
+       sama7g5_pmc->chws[PMC_UTMI] = hw;
+
+       parent_names[0] = md_slck_name;
+       parent_names[1] = td_slck_name;
+       parent_names[2] = "mainck";
+       parent_names[3] = "mck0";
+       parent_names[4] = "syspll_divpmcck";
+       parent_names[5] = "ddrpll_divpmcck";
+       parent_names[6] = "imgpll_divpmcck";
+       parent_names[7] = "baudpll_divpmcck";
+       parent_names[8] = "audiopll_divpmcck";
+       parent_names[9] = "ethpll_divpmcck";
+       for (i = 0; i < 8; i++) {
+               char name[6];
+
+               snprintf(name, sizeof(name), "prog%d", i);
+
+               hw = at91_clk_register_programmable(regmap, name, parent_names,
+                                                   10, i,
+                                                   &programmable_layout,
+                                                   sama7g5_prog_mux_table);
+               if (IS_ERR(hw))
+                       goto err_free;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
+               hw = at91_clk_register_system(regmap, sama7g5_systemck[i].n,
+                                             sama7g5_systemck[i].p,
+                                             sama7g5_systemck[i].id);
+               if (IS_ERR(hw))
+                       goto err_free;
+
+               sama7g5_pmc->shws[sama7g5_systemck[i].id] = hw;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(sama7g5_periphck); i++) {
+               hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+                                               &sama7g5_pcr_layout,
+                                               sama7g5_periphck[i].n,
+                                               sama7g5_periphck[i].p,
+                                               sama7g5_periphck[i].id,
+                                               &sama7g5_periphck[i].r,
+                                               sama7g5_periphck[i].chgp ? 0 :
+                                               INT_MIN);
+               if (IS_ERR(hw))
+                       goto err_free;
+
+               sama7g5_pmc->phws[sama7g5_periphck[i].id] = hw;
+       }
+
+       parent_names[0] = md_slck_name;
+       parent_names[1] = td_slck_name;
+       parent_names[2] = "mainck";
+       parent_names[3] = "mck0";
+       for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) {
+               u8 num_parents = 4 + sama7g5_gck[i].pp_count;
+               u32 *mux_table;
+
+               mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+                                         GFP_KERNEL);
+               if (!mux_table)
+                       goto err_free;
+
+               SAMA7G5_INIT_TABLE(mux_table, 4);
+               SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_gck[i].pp_mux_table,
+                                  sama7g5_gck[i].pp_count);
+               SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_gck[i].pp,
+                                  sama7g5_gck[i].pp_count);
+
+               hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+                                                &sama7g5_pcr_layout,
+                                                sama7g5_gck[i].n,
+                                                parent_names, mux_table,
+                                                num_parents,
+                                                sama7g5_gck[i].id,
+                                                &sama7g5_gck[i].r,
+                                                sama7g5_gck[i].pp_chg_id);
+               if (IS_ERR(hw))
+                       goto err_free;
+
+               sama7g5_pmc->ghws[sama7g5_gck[i].id] = hw;
+               alloc_mem[alloc_mem_size++] = mux_table;
+       }
+
+       of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7g5_pmc);
+
+       return;
+
+err_free:
+       if (alloc_mem) {
+               for (i = 0; i < alloc_mem_size; i++)
+                       kfree(alloc_mem[i]);
+               kfree(alloc_mem);
+       }
+
+       pmc_data_free(sama7g5_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7g5_pmc, "microchip,sama7g5-pmc", sama7g5_pmc_setup);
index 15dc4cd..2d65770 100644 (file)
@@ -471,8 +471,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
        if (!regbase)
                return;
 
-       slow_rc = clk_hw_register_fixed_rate(NULL, parent_names[0], NULL, 0,
-                                            32768);
+       slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0],
+                                                          NULL, 0, 32768,
+                                                          93750000);
        if (IS_ERR(slow_rc))
                return;
 
index 027eba3..3439bc6 100644 (file)
@@ -314,6 +314,7 @@ struct bcm2835_cprman {
        struct device *dev;
        void __iomem *regs;
        spinlock_t regs_lock; /* spinlock for all clocks */
+       unsigned int soc;
 
        /*
         * Real names of cprman clock parents looked up through
@@ -526,6 +527,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw)
                A2W_PLL_CTRL_PRST_DISABLE;
 }
 
+static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
+                                      const struct bcm2835_pll_data *data)
+{
+       /*
+        * On BCM2711 there isn't a pre-divisor available in the PLL feedback
+        * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
+        * for to for VCO RANGE bits.
+        */
+       if (cprman->soc & SOC_BCM2711)
+               return 0;
+
+       return data->ana->fb_prediv_mask;
+}
+
 static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
                                             unsigned long parent_rate,
                                             u32 *ndiv, u32 *fdiv)
@@ -583,7 +598,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
        ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
        pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
        using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
-               data->ana->fb_prediv_mask;
+                      bcm2835_pll_get_prediv_mask(cprman, data);
 
        if (using_prediv) {
                ndiv *= 2;
@@ -666,6 +681,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
        struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
        struct bcm2835_cprman *cprman = pll->cprman;
        const struct bcm2835_pll_data *data = pll->data;
+       u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
        bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
        u32 ndiv, fdiv, a2w_ctl;
        u32 ana[4];
@@ -683,7 +699,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
        for (i = 3; i >= 0; i--)
                ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
 
-       was_using_prediv = ana[1] & data->ana->fb_prediv_mask;
+       was_using_prediv = ana[1] & prediv_mask;
 
        ana[0] &= ~data->ana->mask0;
        ana[0] |= data->ana->set0;
@@ -693,10 +709,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
        ana[3] |= data->ana->set3;
 
        if (was_using_prediv && !use_fb_prediv) {
-               ana[1] &= ~data->ana->fb_prediv_mask;
+               ana[1] &= ~prediv_mask;
                do_ana_setup_first = true;
        } else if (!was_using_prediv && use_fb_prediv) {
-               ana[1] |= data->ana->fb_prediv_mask;
+               ana[1] |= prediv_mask;
                do_ana_setup_first = false;
        } else {
                do_ana_setup_first = true;
@@ -2262,6 +2278,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, cprman);
 
        cprman->onecell.num = asize;
+       cprman->soc = pdata->soc;
        hws = cprman->onecell.hws;
 
        for (i = 0; i < asize; i++) {
index 6fb8af5..e062dd4 100644 (file)
@@ -119,7 +119,7 @@ static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
        if (rate == *parent_rate)
                return *parent_rate;
 
-       div = DIV_ROUND_UP(*parent_rate, rate);
+       div = DIV_ROUND_CLOSEST(*parent_rate, rate);
        if (div < 2)
                return *parent_rate;
 
@@ -145,7 +145,7 @@ static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                return 0;
        }
 
-       div = DIV_ROUND_UP(parent_rate, rate);
+       div = DIV_ROUND_CLOSEST(parent_rate, rate);
        if (div < 2)
                return -EINVAL;
 
index 87fe0b0..86f2e2d 100644 (file)
@@ -89,7 +89,12 @@ static int clk_pwm_probe(struct platform_device *pdev)
        }
 
        if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
-               clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period;
+               clk_pwm->fixed_rate = div64_u64(NSEC_PER_SEC, pargs.period);
+
+       if (!clk_pwm->fixed_rate) {
+               dev_err(&pdev->dev, "fixed_rate cannot be zero\n");
+               return -EINVAL;
+       }
 
        if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
            pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
index 374afca..5942e98 100644 (file)
@@ -244,6 +244,14 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
        },
 };
 
+static const struct clockgen_muxinfo ls1021a_cmux = {
+       {
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+               { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+       }
+};
+
 static const struct clockgen_muxinfo ls1028a_hwa1 = {
        {
                { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
@@ -577,7 +585,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
        {
                .compat = "fsl,ls1021a-clockgen",
                .cmux_groups = {
-                       &t1023_cmux
+                       &ls1021a_cmux
                },
                .cmux_to_group = {
                        0, -1
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
new file mode 100644 (file)
index 0000000..0fad0c1
--- /dev/null
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip Sparx5 SoC Clock driver.
+ *
+ * Copyright (c) 2019 Microchip Inc.
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/microchip,sparx5.h>
+
+#define PLL_DIV                GENMASK(7, 0)
+#define PLL_PRE_DIV    GENMASK(10, 8)
+#define PLL_ROT_DIR    BIT(11)
+#define PLL_ROT_SEL    GENMASK(13, 12)
+#define PLL_ROT_ENA    BIT(14)
+#define PLL_CLK_ENA    BIT(15)
+
+#define MAX_SEL 4
+#define MAX_PRE BIT(3)
+
+static const u8 sel_rates[MAX_SEL] = { 0, 2*8, 2*4, 2*2 };
+
+static const char *clk_names[N_CLOCKS] = {
+       "core", "ddr", "cpu2", "arm2",
+       "aux1", "aux2", "aux3", "aux4",
+       "synce",
+};
+
+struct s5_hw_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+};
+
+struct s5_clk_data {
+       void __iomem *base;
+       struct s5_hw_clk s5_hw[N_CLOCKS];
+};
+
+struct s5_pll_conf {
+       unsigned long freq;
+       u8 div;
+       bool rot_ena;
+       u8 rot_sel;
+       u8 rot_dir;
+       u8 pre_div;
+};
+
+#define to_s5_pll(hw) container_of(hw, struct s5_hw_clk, hw)
+
+static unsigned long s5_calc_freq(unsigned long parent_rate,
+                                 const struct s5_pll_conf *conf)
+{
+       unsigned long rate = parent_rate / conf->div;
+
+       if (conf->rot_ena) {
+               int sign = conf->rot_dir ? -1 : 1;
+               int divt = sel_rates[conf->rot_sel] * (1 + conf->pre_div);
+               int divb = divt + sign;
+
+               rate = mult_frac(rate, divt, divb);
+               rate = roundup(rate, 1000);
+       }
+
+       return rate;
+}
+
+static void s5_search_fractional(unsigned long rate,
+                                unsigned long parent_rate,
+                                int div,
+                                struct s5_pll_conf *conf)
+{
+       struct s5_pll_conf best;
+       ulong cur_offset, best_offset = rate;
+       int d, i, j;
+
+       memset(conf, 0, sizeof(*conf));
+       conf->div = div;
+       conf->rot_ena = 1;      /* Fractional rate */
+
+       for (d = 0; best_offset > 0 && d <= 1 ; d++) {
+               conf->rot_dir = !!d;
+               for (i = 0; best_offset > 0 && i < MAX_PRE; i++) {
+                       conf->pre_div = i;
+                       for (j = 1; best_offset > 0 && j < MAX_SEL; j++) {
+                               conf->rot_sel = j;
+                               conf->freq = s5_calc_freq(parent_rate, conf);
+                               cur_offset = abs(rate - conf->freq);
+                               if (cur_offset < best_offset) {
+                                       best_offset = cur_offset;
+                                       best = *conf;
+                               }
+                       }
+               }
+       }
+
+       /* Best match */
+       *conf = best;
+}
+
+static unsigned long s5_calc_params(unsigned long rate,
+                                   unsigned long parent_rate,
+                                   struct s5_pll_conf *conf)
+{
+       if (parent_rate % rate) {
+               struct s5_pll_conf alt1, alt2;
+               int div;
+
+               div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
+               s5_search_fractional(rate, parent_rate, div, &alt1);
+
+               /* Straight match? */
+               if (alt1.freq == rate) {
+                       *conf = alt1;
+               } else {
+                       /* Try without rounding divider */
+                       div = parent_rate / rate;
+                       if (div != alt1.div) {
+                               s5_search_fractional(rate, parent_rate, div,
+                                                    &alt2);
+                               /* Select the better match */
+                               if (abs(rate - alt1.freq) <
+                                   abs(rate - alt2.freq))
+                                       *conf = alt1;
+                               else
+                                       *conf = alt2;
+                       }
+               }
+       } else {
+               /* Straight fit */
+               memset(conf, 0, sizeof(*conf));
+               conf->div = parent_rate / rate;
+       }
+
+       return conf->freq;
+}
+
+static int s5_pll_enable(struct clk_hw *hw)
+{
+       struct s5_hw_clk *pll = to_s5_pll(hw);
+       u32 val = readl(pll->reg);
+
+       val |= PLL_CLK_ENA;
+       writel(val, pll->reg);
+
+       return 0;
+}
+
+static void s5_pll_disable(struct clk_hw *hw)
+{
+       struct s5_hw_clk *pll = to_s5_pll(hw);
+       u32 val = readl(pll->reg);
+
+       val &= ~PLL_CLK_ENA;
+       writel(val, pll->reg);
+}
+
+static int s5_pll_set_rate(struct clk_hw *hw,
+                          unsigned long rate,
+                          unsigned long parent_rate)
+{
+       struct s5_hw_clk *pll = to_s5_pll(hw);
+       struct s5_pll_conf conf;
+       unsigned long eff_rate;
+       u32 val;
+
+       eff_rate = s5_calc_params(rate, parent_rate, &conf);
+       if (eff_rate != rate)
+               return -EOPNOTSUPP;
+
+       val = readl(pll->reg) & PLL_CLK_ENA;
+       val |= FIELD_PREP(PLL_DIV, conf.div);
+       if (conf.rot_ena) {
+               val |= PLL_ROT_ENA;
+               val |= FIELD_PREP(PLL_ROT_SEL, conf.rot_sel);
+               val |= FIELD_PREP(PLL_PRE_DIV, conf.pre_div);
+               if (conf.rot_dir)
+                       val |= PLL_ROT_DIR;
+       }
+       writel(val, pll->reg);
+
+       return 0;
+}
+
+static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
+                                       unsigned long parent_rate)
+{
+       struct s5_hw_clk *pll = to_s5_pll(hw);
+       struct s5_pll_conf conf;
+       u32 val;
+
+       val = readl(pll->reg);
+
+       if (val & PLL_CLK_ENA) {
+               conf.div     = FIELD_GET(PLL_DIV, val);
+               conf.pre_div = FIELD_GET(PLL_PRE_DIV, val);
+               conf.rot_ena = FIELD_GET(PLL_ROT_ENA, val);
+               conf.rot_dir = FIELD_GET(PLL_ROT_DIR, val);
+               conf.rot_sel = FIELD_GET(PLL_ROT_SEL, val);
+
+               conf.freq = s5_calc_freq(parent_rate, &conf);
+       } else {
+               conf.freq = 0;
+       }
+
+       return conf.freq;
+}
+
+static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long *parent_rate)
+{
+       struct s5_pll_conf conf;
+
+       return s5_calc_params(rate, *parent_rate, &conf);
+}
+
+static const struct clk_ops s5_pll_ops = {
+       .enable         = s5_pll_enable,
+       .disable        = s5_pll_disable,
+       .set_rate       = s5_pll_set_rate,
+       .round_rate     = s5_pll_round_rate,
+       .recalc_rate    = s5_pll_recalc_rate,
+};
+
+static struct clk_hw *s5_clk_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+       struct s5_clk_data *s5_clk = data;
+       unsigned int idx = clkspec->args[0];
+
+       if (idx >= N_CLOCKS) {
+               pr_err("%s: invalid index %u\n", __func__, idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return &s5_clk->s5_hw[idx].hw;
+}
+
+static int s5_clk_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       int i, ret;
+       struct s5_clk_data *s5_clk;
+       struct clk_parent_data pdata = { .index = 0 };
+       struct clk_init_data init = {
+               .ops = &s5_pll_ops,
+               .num_parents = 1,
+               .parent_data = &pdata,
+       };
+
+       s5_clk = devm_kzalloc(dev, sizeof(*s5_clk), GFP_KERNEL);
+       if (!s5_clk)
+               return -ENOMEM;
+
+       s5_clk->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(s5_clk->base))
+               return PTR_ERR(s5_clk->base);
+
+       for (i = 0; i < N_CLOCKS; i++) {
+               struct s5_hw_clk *s5_hw = &s5_clk->s5_hw[i];
+
+               init.name = clk_names[i];
+               s5_hw->reg = s5_clk->base + (i * 4);
+               s5_hw->hw.init = &init;
+               ret = devm_clk_hw_register(dev, &s5_hw->hw);
+               if (ret) {
+                       dev_err(dev, "failed to register %s clock\n",
+                               init.name);
+                       return ret;
+               }
+       }
+
+       return devm_of_clk_add_hw_provider(dev, s5_clk_hw_get, s5_clk);
+}
+
+static const struct of_device_id s5_clk_dt_ids[] = {
+       { .compatible = "microchip,sparx5-dpll", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, s5_clk_dt_ids);
+
+static struct platform_driver s5_clk_driver = {
+       .probe  = s5_clk_probe,
+       .driver = {
+               .name = "sparx5-clk",
+               .of_match_table = s5_clk_dt_ids,
+       },
+};
+builtin_platform_driver(s5_clk_driver);
index 9a5fb38..c90460e 100644 (file)
@@ -167,6 +167,12 @@ struct vc5_hw_data {
        u32                     div_int;
        u32                     div_frc;
        unsigned int            num;
+};
+
+struct vc5_out_data {
+       struct clk_hw           hw;
+       struct vc5_driver_data  *vc5;
+       unsigned int            num;
        unsigned int            clk_output_cfg0;
        unsigned int            clk_output_cfg0_mask;
 };
@@ -184,7 +190,7 @@ struct vc5_driver_data {
        struct clk_hw           clk_pfd;
        struct vc5_hw_data      clk_pll;
        struct vc5_hw_data      clk_fod[VC5_MAX_FOD_NUM];
-       struct vc5_hw_data      clk_out[VC5_MAX_CLK_OUT_NUM];
+       struct vc5_out_data     clk_out[VC5_MAX_CLK_OUT_NUM];
 };
 
 /*
@@ -567,7 +573,7 @@ static const struct clk_ops vc5_fod_ops = {
 
 static int vc5_clk_out_prepare(struct clk_hw *hw)
 {
-       struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+       struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
        struct vc5_driver_data *vc5 = hwdata->vc5;
        const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
                        VC5_OUT_DIV_CONTROL_SEL_EXT |
@@ -609,7 +615,7 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
 
 static void vc5_clk_out_unprepare(struct clk_hw *hw)
 {
-       struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+       struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
        struct vc5_driver_data *vc5 = hwdata->vc5;
 
        /* Disable the clock buffer */
@@ -619,7 +625,7 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw)
 
 static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
 {
-       struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+       struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
        struct vc5_driver_data *vc5 = hwdata->vc5;
        const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM |
                        VC5_OUT_DIV_CONTROL_SEL_EXT |
@@ -649,7 +655,7 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
 
 static int vc5_clk_out_set_parent(struct clk_hw *hw, u8 index)
 {
-       struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
+       struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw);
        struct vc5_driver_data *vc5 = hwdata->vc5;
        const u8 mask = VC5_OUT_DIV_CONTROL_RESET |
                        VC5_OUT_DIV_CONTROL_SELB_NORM |
@@ -704,7 +710,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
 }
 
 static int vc5_update_mode(struct device_node *np_output,
-                          struct vc5_hw_data *clk_out)
+                          struct vc5_out_data *clk_out)
 {
        u32 value;
 
@@ -729,7 +735,7 @@ static int vc5_update_mode(struct device_node *np_output,
 }
 
 static int vc5_update_power(struct device_node *np_output,
-                           struct vc5_hw_data *clk_out)
+                           struct vc5_out_data *clk_out)
 {
        u32 value;
 
@@ -754,7 +760,7 @@ static int vc5_update_power(struct device_node *np_output,
 }
 
 static int vc5_update_slew(struct device_node *np_output,
-                          struct vc5_hw_data *clk_out)
+                          struct vc5_out_data *clk_out)
 {
        u32 value;
 
@@ -782,17 +788,20 @@ static int vc5_update_slew(struct device_node *np_output,
 }
 
 static int vc5_get_output_config(struct i2c_client *client,
-                                struct vc5_hw_data *clk_out)
+                                struct vc5_out_data *clk_out)
 {
        struct device_node *np_output;
        char *child_name;
        int ret = 0;
 
        child_name = kasprintf(GFP_KERNEL, "OUT%d", clk_out->num + 1);
+       if (!child_name)
+               return -ENOMEM;
+
        np_output = of_get_child_by_name(client->dev.of_node, child_name);
        kfree(child_name);
        if (!np_output)
-               goto output_done;
+               return 0;
 
        ret = vc5_update_mode(np_output, clk_out);
        if (ret)
@@ -813,7 +822,6 @@ output_error:
 
        of_node_put(np_output);
 
-output_done:
        return ret;
 }
 
@@ -828,7 +836,7 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
        int ret;
 
        vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL);
-       if (vc5 == NULL)
+       if (!vc5)
                return -ENOMEM;
 
        i2c_set_clientdata(client, vc5);
@@ -882,11 +890,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
        init.parent_names = parent_names;
        vc5->clk_mux.init = &init;
        ret = devm_clk_hw_register(&client->dev, &vc5->clk_mux);
+       if (ret)
+               goto err_clk_register;
        kfree(init.name);       /* clock framework made a copy of the name */
-       if (ret) {
-               dev_err(&client->dev, "unable to register %s\n", init.name);
-               goto err_clk;
-       }
 
        if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) {
                /* Register frequency doubler */
@@ -900,12 +906,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
                init.num_parents = 1;
                vc5->clk_mul.init = &init;
                ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul);
+               if (ret)
+                       goto err_clk_register;
                kfree(init.name); /* clock framework made a copy of the name */
-               if (ret) {
-                       dev_err(&client->dev, "unable to register %s\n",
-                               init.name);
-                       goto err_clk;
-               }
        }
 
        /* Register PFD */
@@ -921,11 +924,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
        init.num_parents = 1;
        vc5->clk_pfd.init = &init;
        ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd);
+       if (ret)
+               goto err_clk_register;
        kfree(init.name);       /* clock framework made a copy of the name */
-       if (ret) {
-               dev_err(&client->dev, "unable to register %s\n", init.name);
-               goto err_clk;
-       }
 
        /* Register PLL */
        memset(&init, 0, sizeof(init));
@@ -939,11 +940,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
        vc5->clk_pll.vc5 = vc5;
        vc5->clk_pll.hw.init = &init;
        ret = devm_clk_hw_register(&client->dev, &vc5->clk_pll.hw);
+       if (ret)
+               goto err_clk_register;
        kfree(init.name); /* clock framework made a copy of the name */
-       if (ret) {
-               dev_err(&client->dev, "unable to register %s\n", init.name);
-               goto err_clk;
-       }
 
        /* Register FODs */
        for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) {
@@ -960,12 +959,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
                vc5->clk_fod[n].vc5 = vc5;
                vc5->clk_fod[n].hw.init = &init;
                ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw);
+               if (ret)
+                       goto err_clk_register;
                kfree(init.name); /* clock framework made a copy of the name */
-               if (ret) {
-                       dev_err(&client->dev, "unable to register %s\n",
-                               init.name);
-                       goto err_clk;
-               }
        }
 
        /* Register MUX-connected OUT0_I2C_SELB output */
@@ -981,11 +977,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
        vc5->clk_out[0].vc5 = vc5;
        vc5->clk_out[0].hw.init = &init;
        ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[0].hw);
-       kfree(init.name);       /* clock framework made a copy of the name */
-       if (ret) {
-               dev_err(&client->dev, "unable to register %s\n", init.name);
-               goto err_clk;
-       }
+       if (ret)
+               goto err_clk_register;
+       kfree(init.name); /* clock framework made a copy of the name */
 
        /* Register FOD-connected OUTx outputs */
        for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) {
@@ -1008,12 +1002,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
                vc5->clk_out[n].vc5 = vc5;
                vc5->clk_out[n].hw.init = &init;
                ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[n].hw);
+               if (ret)
+                       goto err_clk_register;
                kfree(init.name); /* clock framework made a copy of the name */
-               if (ret) {
-                       dev_err(&client->dev, "unable to register %s\n",
-                               init.name);
-                       goto err_clk;
-               }
 
                /* Fetch Clock Output configuration from DT (if specified) */
                ret = vc5_get_output_config(client, &vc5->clk_out[n]);
@@ -1029,6 +1020,9 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
        return 0;
 
+err_clk_register:
+       dev_err(&client->dev, "unable to register %s\n", init.name);
+       kfree(init.name); /* clock framework made a copy of the name */
 err_clk:
        if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
                clk_unregister_fixed_rate(vc5->pin_xin);
index 236923b..0a9261a 100644 (file)
@@ -500,12 +500,6 @@ static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
        return core->accuracy;
 }
 
-unsigned long __clk_get_flags(struct clk *clk)
-{
-       return !clk ? 0 : clk->core->flags;
-}
-EXPORT_SYMBOL_GPL(__clk_get_flags);
-
 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
 {
        return hw->core->flags;
@@ -3054,6 +3048,31 @@ static int clk_rate_set(void *data, u64 val)
 }
 
 #define clk_rate_mode  0644
+
+static int clk_prepare_enable_set(void *data, u64 val)
+{
+       struct clk_core *core = data;
+       int ret = 0;
+
+       if (val)
+               ret = clk_prepare_enable(core->hw->clk);
+       else
+               clk_disable_unprepare(core->hw->clk);
+
+       return ret;
+}
+
+static int clk_prepare_enable_get(void *data, u64 *val)
+{
+       struct clk_core *core = data;
+
+       *val = core->enable_count && core->prepare_count;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
+                        clk_prepare_enable_set, "%llu\n");
+
 #else
 #define clk_rate_set   NULL
 #define clk_rate_mode  0444
@@ -3231,6 +3250,10 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
        debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
        debugfs_create_file("clk_duty_cycle", 0444, root, core,
                            &clk_duty_cycle_fops);
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+       debugfs_create_file("clk_prepare_enable", 0644, root, core,
+                           &clk_prepare_enable_fops);
+#endif
 
        if (core->num_parents > 0)
                debugfs_create_file("clk_parent", 0444, root, core,
@@ -4135,6 +4158,7 @@ static int devm_clk_hw_match(struct device *dev, void *res, void *data)
 
 /**
  * devm_clk_unregister - resource managed clk_unregister()
+ * @dev: device that is unregistering the clock data
  * @clk: clock to unregister
  *
  * Deallocate a clock allocated with devm_clk_register(). Normally
@@ -4324,6 +4348,8 @@ static void clk_core_reparent_orphans(void)
  * @node: Pointer to device tree node of clock provider
  * @get: Get clock callback.  Returns NULL or a struct clk for the
  *       given clock specifier
+ * @get_hw: Get clk_hw callback.  Returns NULL, ERR_PTR or a
+ *       struct clk_hw for the given clock specifier
  * @data: context pointer to be passed into @get callback
  */
 struct of_clk_provider {
index 8a23d5d..6c35e4b 100644 (file)
@@ -651,7 +651,7 @@ static int davinci_pll_sysclk_rate_change(struct notifier_block *nb,
                pllcmd = readl(pll->base + PLLCMD);
                pllcmd |= PLLCMD_GOSET;
                writel(pllcmd, pll->base + PLLCMD);
-               /* fallthrough */
+               fallthrough;
        case PRE_RATE_CHANGE:
                /* Wait until for outstanding changes to take effect */
                do {
index a7db930..b20cdea 100644 (file)
@@ -433,7 +433,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
                break;
        case IMX_PLLV3_USB_VF610:
                pll->div_shift = 1;
-               /* fall through */
+               fallthrough;
        case IMX_PLLV3_USB:
                ops = &clk_pllv3_ops;
                pll->powerup_set = true;
@@ -441,7 +441,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
        case IMX_PLLV3_AV_IMX7:
                pll->num_offset = PLL_IMX7_NUM_OFFSET;
                pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
-               /* fall through */
+               fallthrough;
        case IMX_PLLV3_AV:
                ops = &clk_pllv3_av_ops;
                break;
index 6c5b802..0268d23 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Copyright (c) 2013-2015 Imagination Technologies
  * Author: Paul Burton <paul.burton@mips.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
  */
 
 #include <linux/clk-provider.h>
 
 /* CGU register offsets */
 #define CGU_REG_CLOCKCONTROL   0x00
-#define CGU_REG_LCR                    0x04
-#define CGU_REG_APLL           0x10
-#define CGU_REG_MPLL           0x14
-#define CGU_REG_EPLL           0x18
-#define CGU_REG_VPLL           0x1c
-#define CGU_REG_CLKGR0         0x20
-#define CGU_REG_OPCR           0x24
-#define CGU_REG_CLKGR1         0x28
-#define CGU_REG_DDRCDR         0x2c
-#define CGU_REG_VPUCDR         0x30
-#define CGU_REG_USBPCR         0x3c
-#define CGU_REG_USBRDT         0x40
-#define CGU_REG_USBVBFIL       0x44
-#define CGU_REG_USBPCR1                0x48
-#define CGU_REG_LP0CDR         0x54
-#define CGU_REG_I2SCDR         0x60
-#define CGU_REG_LP1CDR         0x64
-#define CGU_REG_MSC0CDR                0x68
-#define CGU_REG_UHCCDR         0x6c
-#define CGU_REG_SSICDR         0x74
-#define CGU_REG_CIMCDR         0x7c
-#define CGU_REG_PCMCDR         0x84
-#define CGU_REG_GPUCDR         0x88
-#define CGU_REG_HDMICDR                0x8c
-#define CGU_REG_MSC1CDR                0xa4
-#define CGU_REG_MSC2CDR                0xa8
-#define CGU_REG_BCHCDR         0xac
-#define CGU_REG_CLOCKSTATUS    0xd4
+#define CGU_REG_LCR                            0x04
+#define CGU_REG_APLL                   0x10
+#define CGU_REG_MPLL                   0x14
+#define CGU_REG_EPLL                   0x18
+#define CGU_REG_VPLL                   0x1c
+#define CGU_REG_CLKGR0                 0x20
+#define CGU_REG_OPCR                   0x24
+#define CGU_REG_CLKGR1                 0x28
+#define CGU_REG_DDRCDR                 0x2c
+#define CGU_REG_VPUCDR                 0x30
+#define CGU_REG_USBPCR                 0x3c
+#define CGU_REG_USBRDT                 0x40
+#define CGU_REG_USBVBFIL               0x44
+#define CGU_REG_USBPCR1                        0x48
+#define CGU_REG_LP0CDR                 0x54
+#define CGU_REG_I2SCDR                 0x60
+#define CGU_REG_LP1CDR                 0x64
+#define CGU_REG_MSC0CDR                        0x68
+#define CGU_REG_UHCCDR                 0x6c
+#define CGU_REG_SSICDR                 0x74
+#define CGU_REG_CIMCDR                 0x7c
+#define CGU_REG_PCMCDR                 0x84
+#define CGU_REG_GPUCDR                 0x88
+#define CGU_REG_HDMICDR                        0x8c
+#define CGU_REG_MSC1CDR                        0xa4
+#define CGU_REG_MSC2CDR                        0xa8
+#define CGU_REG_BCHCDR                 0xac
+#define CGU_REG_CLOCKSTATUS            0xd4
 
 /* bits within the OPCR register */
-#define OPCR_SPENDN0           BIT(7)
-#define OPCR_SPENDN1           BIT(6)
+#define OPCR_SPENDN0                   BIT(7)
+#define OPCR_SPENDN1                   BIT(6)
 
 /* bits within the USBPCR register */
-#define USBPCR_USB_MODE                BIT(31)
+#define USBPCR_USB_MODE                        BIT(31)
 #define USBPCR_IDPULLUP_MASK   (0x3 << 28)
-#define USBPCR_COMMONONN       BIT(25)
-#define USBPCR_VBUSVLDEXT      BIT(24)
+#define USBPCR_COMMONONN               BIT(25)
+#define USBPCR_VBUSVLDEXT              BIT(24)
 #define USBPCR_VBUSVLDEXTSEL   BIT(23)
-#define USBPCR_POR             BIT(22)
-#define USBPCR_OTG_DISABLE     BIT(20)
+#define USBPCR_POR                             BIT(22)
+#define USBPCR_SIDDQ                   BIT(21)
+#define USBPCR_OTG_DISABLE             BIT(20)
 #define USBPCR_COMPDISTUNE_MASK        (0x7 << 17)
-#define USBPCR_OTGTUNE_MASK    (0x7 << 14)
+#define USBPCR_OTGTUNE_MASK            (0x7 << 14)
 #define USBPCR_SQRXTUNE_MASK   (0x7 << 11)
 #define USBPCR_TXFSLSTUNE_MASK (0xf << 7)
 #define USBPCR_TXPREEMPHTUNE   BIT(6)
 #define USBPCR1_REFCLKDIV_48   (0x2 << USBPCR1_REFCLKDIV_SHIFT)
 #define USBPCR1_REFCLKDIV_24   (0x1 << USBPCR1_REFCLKDIV_SHIFT)
 #define USBPCR1_REFCLKDIV_12   (0x0 << USBPCR1_REFCLKDIV_SHIFT)
-#define USBPCR1_USB_SEL                BIT(28)
-#define USBPCR1_WORD_IF0       BIT(19)
-#define USBPCR1_WORD_IF1       BIT(18)
+#define USBPCR1_USB_SEL                        BIT(28)
+#define USBPCR1_WORD_IF0               BIT(19)
+#define USBPCR1_WORD_IF1               BIT(18)
 
 /* bits within the USBRDT register */
-#define USBRDT_VBFIL_LD_EN     BIT(25)
-#define USBRDT_USBRDT_MASK     0x7fffff
+#define USBRDT_VBFIL_LD_EN             BIT(25)
+#define USBRDT_USBRDT_MASK             0x7fffff
 
 /* bits within the USBVBFIL register */
 #define USBVBFIL_IDDIGFIL_SHIFT        16
 #define USBVBFIL_USBVBFIL_MASK (0xffff)
 
 /* bits within the LCR register */
-#define LCR_PD_SCPU                    BIT(31)
-#define LCR_SCPUS                      BIT(27)
+#define LCR_PD_SCPU                            BIT(31)
+#define LCR_SCPUS                              BIT(27)
 
 /* bits within the CLKGR1 register */
-#define CLKGR1_CORE1           BIT(15)
+#define CLKGR1_CORE1                   BIT(15)
 
 static struct ingenic_cgu *cgu;
 
-static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw)
-{
-       /* we only use CLKCORE, revisit if that ever changes */
-       return 0;
-}
-
-static int jz4780_otg_phy_set_parent(struct clk_hw *hw, u8 idx)
-{
-       unsigned long flags;
-       u32 usbpcr1;
-
-       if (idx > 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&cgu->lock, flags);
-
-       usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
-       usbpcr1 &= ~USBPCR1_REFCLKSEL_MASK;
-       /* we only use CLKCORE */
-       usbpcr1 |= USBPCR1_REFCLKSEL_CORE;
-       writel(usbpcr1, cgu->base + CGU_REG_USBPCR1);
-
-       spin_unlock_irqrestore(&cgu->lock, flags);
-       return 0;
-}
-
 static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
                                                unsigned long parent_rate)
 {
@@ -149,7 +125,6 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
                return 19200000;
        }
 
-       BUG();
        return parent_rate;
 }
 
@@ -206,13 +181,43 @@ static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
        return 0;
 }
 
-static const struct clk_ops jz4780_otg_phy_ops = {
-       .get_parent = jz4780_otg_phy_get_parent,
-       .set_parent = jz4780_otg_phy_set_parent,
+static int jz4780_otg_phy_enable(struct clk_hw *hw)
+{
+       void __iomem *reg_opcr          = cgu->base + CGU_REG_OPCR;
+       void __iomem *reg_usbpcr        = cgu->base + CGU_REG_USBPCR;
+
+       writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr);
+       writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+       return 0;
+}
 
+static void jz4780_otg_phy_disable(struct clk_hw *hw)
+{
+       void __iomem *reg_opcr          = cgu->base + CGU_REG_OPCR;
+       void __iomem *reg_usbpcr        = cgu->base + CGU_REG_USBPCR;
+
+       writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr);
+       writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
+{
+       void __iomem *reg_opcr          = cgu->base + CGU_REG_OPCR;
+       void __iomem *reg_usbpcr        = cgu->base + CGU_REG_USBPCR;
+
+       return (readl(reg_opcr) & OPCR_SPENDN0) &&
+               !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+               !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops jz4780_otg_phy_ops = {
        .recalc_rate = jz4780_otg_phy_recalc_rate,
        .round_rate = jz4780_otg_phy_round_rate,
        .set_rate = jz4780_otg_phy_set_rate,
+
+       .enable         = jz4780_otg_phy_enable,
+       .disable        = jz4780_otg_phy_disable,
+       .is_enabled     = jz4780_otg_phy_is_enabled,
 };
 
 static int jz4780_core1_enable(struct clk_hw *hw)
@@ -516,6 +521,18 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
                .gate = { CGU_REG_CLKGR0, 1 },
        },
 
+       [JZ4780_CLK_EXCLK_DIV512] = {
+               "exclk_div512", CGU_CLK_FIXDIV,
+               .parents = { JZ4780_CLK_EXCLK },
+               .fixdiv = { 512 },
+       },
+
+       [JZ4780_CLK_RTC] = {
+               "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+               .parents = { JZ4780_CLK_EXCLK_DIV512, JZ4780_CLK_RTCLK },
+               .mux = { CGU_REG_OPCR, 2, 1},
+       },
+
        /* Gate-only clocks */
 
        [JZ4780_CLK_NEMC] = {
index 453f332..9aa20b5 100644 (file)
 #define USBPCR_SIDDQ           BIT(21)
 #define USBPCR_OTG_DISABLE     BIT(20)
 
+/* bits within the USBPCR1 register */
+#define USBPCR1_REFCLKSEL_SHIFT        26
+#define USBPCR1_REFCLKSEL_MASK (0x3 << USBPCR1_REFCLKSEL_SHIFT)
+#define USBPCR1_REFCLKSEL_CORE (0x2 << USBPCR1_REFCLKSEL_SHIFT)
+#define USBPCR1_REFCLKDIV_SHIFT        24
+#define USBPCR1_REFCLKDIV_MASK (0x3 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_48   (0x2 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_24   (0x1 << USBPCR1_REFCLKDIV_SHIFT)
+#define USBPCR1_REFCLKDIV_12   (0x0 << USBPCR1_REFCLKDIV_SHIFT)
+
 static struct ingenic_cgu *cgu;
 
+static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       u32 usbpcr1;
+       unsigned refclk_div;
+
+       usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
+       refclk_div = usbpcr1 & USBPCR1_REFCLKDIV_MASK;
+
+       switch (refclk_div) {
+       case USBPCR1_REFCLKDIV_12:
+               return 12000000;
+
+       case USBPCR1_REFCLKDIV_24:
+               return 24000000;
+
+       case USBPCR1_REFCLKDIV_48:
+               return 48000000;
+       }
+
+       return parent_rate;
+}
+
+static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
+                                     unsigned long *parent_rate)
+{
+       if (req_rate < 18000000)
+               return 12000000;
+
+       if (req_rate < 36000000)
+               return 24000000;
+
+       return 48000000;
+}
+
+static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
+                                  unsigned long parent_rate)
+{
+       unsigned long flags;
+       u32 usbpcr1, div_bits;
+
+       switch (req_rate) {
+       case 12000000:
+               div_bits = USBPCR1_REFCLKDIV_12;
+               break;
+
+       case 24000000:
+               div_bits = USBPCR1_REFCLKDIV_24;
+               break;
+
+       case 48000000:
+               div_bits = USBPCR1_REFCLKDIV_48;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&cgu->lock, flags);
+
+       usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1);
+       usbpcr1 &= ~USBPCR1_REFCLKDIV_MASK;
+       usbpcr1 |= div_bits;
+       writel(usbpcr1, cgu->base + CGU_REG_USBPCR1);
+
+       spin_unlock_irqrestore(&cgu->lock, flags);
+       return 0;
+}
+
 static int x1000_usb_phy_enable(struct clk_hw *hw)
 {
        void __iomem *reg_opcr          = cgu->base + CGU_REG_OPCR;
@@ -80,6 +159,10 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
 }
 
 static const struct clk_ops x1000_otg_phy_ops = {
+       .recalc_rate = x1000_otg_phy_recalc_rate,
+       .round_rate = x1000_otg_phy_round_rate,
+       .set_rate = x1000_otg_phy_set_rate,
+
        .enable         = x1000_usb_phy_enable,
        .disable        = x1000_usb_phy_disable,
        .is_enabled     = x1000_usb_phy_is_enabled,
@@ -144,7 +227,6 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
                },
        },
 
-
        /* Custom (SoC-specific) OTG PHY */
 
        [X1000_CLK_OTGPHY] = {
@@ -278,6 +360,19 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
                .mux = { CGU_REG_SSICDR, 30, 1 },
        },
 
+       [X1000_CLK_EXCLK_DIV512] = {
+               "exclk_div512", CGU_CLK_FIXDIV,
+               .parents = { X1000_CLK_EXCLK },
+               .fixdiv = { 512 },
+       },
+
+       [X1000_CLK_RTC] = {
+               "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+               .parents = { X1000_CLK_EXCLK_DIV512, X1000_CLK_RTCLK },
+               .mux = { CGU_REG_OPCR, 2, 1},
+               .gate = { CGU_REG_CLKGR, 27 },
+       },
+
        /* Gate-only clocks */
 
        [X1000_CLK_EMC] = {
index a1b2ff0..950aee2 100644 (file)
@@ -329,6 +329,19 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
                .mux = { CGU_REG_SSICDR, 29, 1 },
        },
 
+       [X1830_CLK_EXCLK_DIV512] = {
+               "exclk_div512", CGU_CLK_FIXDIV,
+               .parents = { X1830_CLK_EXCLK },
+               .fixdiv = { 512 },
+       },
+
+       [X1830_CLK_RTC] = {
+               "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE,
+               .parents = { X1830_CLK_EXCLK_DIV512, X1830_CLK_RTCLK },
+               .mux = { CGU_REG_OPCR, 2, 1},
+               .gate = { CGU_REG_CLKGR0, 29 },
+       },
+
        /* Gate-only clocks */
 
        [X1830_CLK_EMC] = {
index 8e2551a..b351039 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/mmp.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
index 7a79651..f254cef 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/mmp.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
index 318c0ad..0583273 100644 (file)
@@ -308,6 +308,15 @@ config SC_GCC_7180
          Say Y if you want to use peripheral devices such as UART, SPI,
          I2C, USB, UFS, SDCC, etc.
 
+config SC_LPASS_CORECC_7180
+       tristate "SC7180 LPASS Core Clock Controller"
+       select SC_GCC_7180
+       help
+         Support for the LPASS(Low Power Audio Subsystem) core clock controller
+         on SC7180 devices.
+         Say Y if you want to use LPASS clocks and power domains of the LPASS
+         core clock controller.
+
 config SC_GPUCC_7180
        tristate "SC7180 Graphics Clock Controller"
        select SC_GCC_7180
@@ -419,6 +428,22 @@ config SM_GCC_8250
          Say Y if you want to use peripheral devices such as UART,
          SPI, I2C, USB, SD/UFS, PCIe etc.
 
+config SM_GPUCC_8150
+       tristate "SM8150 Graphics Clock Controller"
+       select SM_GCC_8150
+       help
+         Support for the graphics clock controller on SM8150 devices.
+         Say Y if you want to support graphics controller devices and
+         functionality such as 3D graphics.
+
+config SM_GPUCC_8250
+       tristate "SM8250 Graphics Clock Controller"
+       select SM_GCC_8250
+       help
+         Support for the graphics clock controller on SM8250 devices.
+         Say Y if you want to support graphics controller devices and
+         functionality such as 3D graphics.
+
 config SPMI_PMIC_CLKDIV
        tristate "SPMI PMIC clkdiv Support"
        depends on SPMI || COMPILE_TEST
index ae0979b..9677e76 100644 (file)
@@ -54,6 +54,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
 obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
 obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
 obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
+obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
 obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
 obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
 obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
@@ -65,6 +66,8 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
 obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
+obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
+obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
 obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
 obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
index 9b2dfa0..26139ef 100644 (file)
@@ -56,7 +56,6 @@
 #define PLL_STATUS(p)          ((p)->offset + (p)->regs[PLL_OFF_STATUS])
 #define PLL_OPMODE(p)          ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
 #define PLL_FRAC(p)            ((p)->offset + (p)->regs[PLL_OFF_FRAC])
-#define PLL_CAL_VAL(p)         ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL])
 
 const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
        [CLK_ALPHA_PLL_TYPE_DEFAULT] =  {
@@ -112,22 +111,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
                [PLL_OFF_CONFIG_CTL_U1] = 0x20,
                [PLL_OFF_TEST_CTL] = 0x24,
                [PLL_OFF_TEST_CTL_U] = 0x28,
-               [PLL_OFF_STATUS] = 0x30,
-               [PLL_OFF_OPMODE] = 0x38,
-               [PLL_OFF_ALPHA_VAL] = 0x40,
-               [PLL_OFF_CAL_VAL] = 0x44,
-       },
-       [CLK_ALPHA_PLL_TYPE_LUCID] =  {
-               [PLL_OFF_L_VAL] = 0x04,
-               [PLL_OFF_CAL_L_VAL] = 0x08,
-               [PLL_OFF_USER_CTL] = 0x0c,
-               [PLL_OFF_USER_CTL_U] = 0x10,
-               [PLL_OFF_USER_CTL_U1] = 0x14,
-               [PLL_OFF_CONFIG_CTL] = 0x18,
-               [PLL_OFF_CONFIG_CTL_U] = 0x1c,
-               [PLL_OFF_CONFIG_CTL_U1] = 0x20,
-               [PLL_OFF_TEST_CTL] = 0x24,
-               [PLL_OFF_TEST_CTL_U] = 0x28,
                [PLL_OFF_TEST_CTL_U1] = 0x2c,
                [PLL_OFF_STATUS] = 0x30,
                [PLL_OFF_OPMODE] = 0x38,
@@ -156,9 +139,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
 #define PLL_OUT_MASK           0x7
 #define PLL_RATE_MARGIN                500
 
+/* TRION PLL specific settings and offsets */
+#define TRION_PLL_CAL_VAL      0x44
+#define TRION_PCAL_DONE                BIT(26)
+
 /* LUCID PLL specific settings and offsets */
-#define LUCID_PLL_CAL_VAL      0x44
-#define LUCID_PCAL_DONE                BIT(26)
+#define LUCID_PCAL_DONE                BIT(27)
 
 #define pll_alpha_width(p)                                     \
                ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
@@ -912,14 +898,14 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
 
-const struct clk_ops clk_trion_fixed_pll_ops = {
+const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
        .enable = clk_trion_pll_enable,
        .disable = clk_trion_pll_disable,
        .is_enabled = clk_trion_pll_is_enabled,
        .recalc_rate = clk_trion_pll_recalc_rate,
        .round_rate = clk_alpha_pll_round_rate,
 };
-EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
 
 static unsigned long
 clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -1339,12 +1325,12 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
                                  val << PLL_POST_DIV_SHIFT);
 }
 
-const struct clk_ops clk_trion_pll_postdiv_ops = {
+const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
        .recalc_rate = clk_trion_pll_postdiv_recalc_rate,
        .round_rate = clk_trion_pll_postdiv_round_rate,
        .set_rate = clk_trion_pll_postdiv_set_rate,
 };
-EXPORT_SYMBOL_GPL(clk_trion_pll_postdiv_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
 
 static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
                                unsigned long rate, unsigned long *prate)
@@ -1399,13 +1385,13 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
  * @regmap: register map
  * @config: configuration to apply for pll
  */
-void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config)
 {
        if (config->l)
                regmap_write(regmap, PLL_L_VAL(pll), config->l);
 
-       regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
+       regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
 
        if (config->alpha)
                regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
@@ -1458,13 +1444,13 @@ void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
        /* Place the PLL in STANDBY mode */
        regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
 }
-EXPORT_SYMBOL_GPL(clk_lucid_pll_configure);
+EXPORT_SYMBOL_GPL(clk_trion_pll_configure);
 
 /*
- * The Lucid PLL requires a power-on self-calibration which happens when the
+ * The TRION PLL requires a power-on self-calibration which happens when the
  * PLL comes out of reset. Calibrate in case it is not completed.
  */
-static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+static int __alpha_pll_trion_prepare(struct clk_hw *hw, u32 pcal_done)
 {
        struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
        u32 regval;
@@ -1472,7 +1458,7 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
 
        /* Return early if calibration is not needed. */
        regmap_read(pll->clkr.regmap, PLL_STATUS(pll), &regval);
-       if (regval & LUCID_PCAL_DONE)
+       if (regval & pcal_done)
                return 0;
 
        /* On/off to calibrate */
@@ -1483,7 +1469,17 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw)
        return ret;
 }
 
-static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
+static int alpha_pll_trion_prepare(struct clk_hw *hw)
+{
+       return __alpha_pll_trion_prepare(hw, TRION_PCAL_DONE);
+}
+
+static int alpha_pll_lucid_prepare(struct clk_hw *hw)
+{
+       return __alpha_pll_trion_prepare(hw, LUCID_PCAL_DONE);
+}
+
+static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
                                    unsigned long prate)
 {
        struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
@@ -1537,25 +1533,27 @@ static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
-const struct clk_ops clk_alpha_pll_lucid_ops = {
-       .prepare = alpha_pll_lucid_prepare,
+const struct clk_ops clk_alpha_pll_trion_ops = {
+       .prepare = alpha_pll_trion_prepare,
        .enable = clk_trion_pll_enable,
        .disable = clk_trion_pll_disable,
        .is_enabled = clk_trion_pll_is_enabled,
        .recalc_rate = clk_trion_pll_recalc_rate,
        .round_rate = clk_alpha_pll_round_rate,
-       .set_rate = alpha_pll_lucid_set_rate,
+       .set_rate = alpha_pll_trion_set_rate,
 };
-EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
 
-const struct clk_ops clk_alpha_pll_fixed_lucid_ops = {
+const struct clk_ops clk_alpha_pll_lucid_ops = {
+       .prepare = alpha_pll_lucid_prepare,
        .enable = clk_trion_pll_enable,
        .disable = clk_trion_pll_disable,
        .is_enabled = clk_trion_pll_is_enabled,
        .recalc_rate = clk_trion_pll_recalc_rate,
        .round_rate = clk_alpha_pll_round_rate,
+       .set_rate = alpha_pll_trion_set_rate,
 };
-EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops);
+EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
 
 const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
        .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
index 1ba82be..d3201b8 100644 (file)
@@ -14,7 +14,7 @@ enum {
        CLK_ALPHA_PLL_TYPE_BRAMMO,
        CLK_ALPHA_PLL_TYPE_FABIA,
        CLK_ALPHA_PLL_TYPE_TRION,
-       CLK_ALPHA_PLL_TYPE_LUCID,
+       CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
        CLK_ALPHA_PLL_TYPE_MAX,
 };
 
@@ -134,18 +134,23 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops;
 extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops;
 extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops;
 
+extern const struct clk_ops clk_alpha_pll_trion_ops;
+extern const struct clk_ops clk_alpha_pll_fixed_trion_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_trion_ops;
+
 extern const struct clk_ops clk_alpha_pll_lucid_ops;
-extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops;
+#define clk_alpha_pll_fixed_lucid_ops clk_alpha_pll_fixed_trion_ops
 extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
 
 void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config);
 void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                                const struct alpha_pll_config *config);
-void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config);
+#define clk_lucid_pll_configure(pll, regmap, config) \
+       clk_trion_pll_configure(pll, regmap, config)
+
 
-extern const struct clk_ops clk_trion_fixed_pll_ops;
-extern const struct clk_ops clk_trion_pll_postdiv_ops;
 
 #endif
index 538677b..68d8f7a 100644 (file)
@@ -2251,6 +2251,19 @@ static struct clk_branch gcc_mss_q6_memnoc_axi_clk = {
        },
 };
 
+static struct clk_branch gcc_lpass_cfg_noc_sway_clk = {
+       .halt_reg = 0x47018,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x47018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_lpass_cfg_noc_sway_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
 static struct gdsc ufs_phy_gdsc = {
        .gdscr = 0x77004,
        .pd = {
@@ -2428,6 +2441,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
        [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
        [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
        [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
+       [GCC_LPASS_CFG_NOC_SWAY_CLK] = &gcc_lpass_cfg_noc_sway_clk.clkr,
 };
 
 static const struct qcom_reset_map gcc_sc7180_resets[] = {
index bf57308..f0b47b7 100644 (file)
@@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
 
 static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
        .halt_reg = 0x8a004,
+       .halt_check = BRANCH_HALT,
+       .hwcg_reg = 0x8a004,
+       .hwcg_bit = 1,
        .clkr = {
                .enable_reg = 0x8a004,
                .enable_mask = BIT(0),
@@ -2402,6 +2405,7 @@ static const struct qcom_reset_map gcc_sdm660_resets[] = {
        [GCC_USB_20_BCR] = { 0x2f000 },
        [GCC_USB_30_BCR] = { 0xf000 },
        [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+       [GCC_MSS_RESTART] = { 0x79000 },
 };
 
 static const struct regmap_config gcc_sdm660_regmap_config = {
index 72524cf..8e9b5b3 100644 (file)
@@ -34,14 +34,8 @@ enum {
        P_SLEEP_CLK,
 };
 
-static const struct pll_vco trion_vco[] = {
-       { 249600000, 2000000000, 0 },
-};
-
 static struct clk_alpha_pll gpll0 = {
        .offset = 0x0,
-       .vco_table = trion_vco,
-       .num_vco = ARRAY_SIZE(trion_vco),
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
        .clkr = {
                .enable_reg = 0x52000,
@@ -53,7 +47,7 @@ static struct clk_alpha_pll gpll0 = {
                                .name = "bi_tcxo",
                        },
                        .num_parents = 1,
-                       .ops = &clk_trion_fixed_pll_ops,
+                       .ops = &clk_alpha_pll_fixed_trion_ops,
                },
        },
 };
@@ -79,14 +73,12 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
                        .hw = &gpll0.clkr.hw,
                },
                .num_parents = 1,
-               .ops = &clk_trion_pll_postdiv_ops,
+               .ops = &clk_alpha_pll_postdiv_trion_ops,
        },
 };
 
 static struct clk_alpha_pll gpll7 = {
        .offset = 0x1a000,
-       .vco_table = trion_vco,
-       .num_vco = ARRAY_SIZE(trion_vco),
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
        .clkr = {
                .enable_reg = 0x52000,
@@ -98,15 +90,13 @@ static struct clk_alpha_pll gpll7 = {
                                .name = "bi_tcxo",
                        },
                        .num_parents = 1,
-                       .ops = &clk_trion_fixed_pll_ops,
+                       .ops = &clk_alpha_pll_fixed_trion_ops,
                },
        },
 };
 
 static struct clk_alpha_pll gpll9 = {
        .offset = 0x1c000,
-       .vco_table = trion_vco,
-       .num_vco = ARRAY_SIZE(trion_vco),
        .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
        .clkr = {
                .enable_reg = 0x52000,
@@ -118,7 +108,7 @@ static struct clk_alpha_pll gpll9 = {
                                .name = "bi_tcxo",
                        },
                        .num_parents = 1,
-                       .ops = &clk_trion_fixed_pll_ops,
+                       .ops = &clk_alpha_pll_fixed_trion_ops,
                },
        },
 };
@@ -1617,6 +1607,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
 };
 
 static struct clk_branch gcc_gpu_gpll0_clk_src = {
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x52004,
                .enable_mask = BIT(15),
@@ -1632,13 +1623,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = {
 };
 
 static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x52004,
                .enable_mask = BIT(16),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_gpu_gpll0_div_clk_src",
                        .parent_hws = (const struct clk_hw *[]){
-                               &gcc_gpu_gpll0_clk_src.clkr.hw },
+                               &gpll0_out_even.clkr.hw },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
@@ -1729,6 +1721,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
 };
 
 static struct clk_branch gcc_npu_gpll0_clk_src = {
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x52004,
                .enable_mask = BIT(18),
@@ -1744,13 +1737,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = {
 };
 
 static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x52004,
                .enable_mask = BIT(19),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_npu_gpll0_div_clk_src",
                        .parent_hws = (const struct clk_hw *[]){
-                               &gcc_npu_gpll0_clk_src.clkr.hw },
+                               &gpll0_out_even.clkr.hw },
                        .num_parents = 1,
                        .flags = CLK_SET_RATE_PARENT,
                        .ops = &clk_branch2_ops,
index 04944f1..bfc4ac0 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/export.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/ktime.h>
@@ -29,6 +30,7 @@
 /* CFG_GDSCR */
 #define GDSC_POWER_UP_COMPLETE         BIT(16)
 #define GDSC_POWER_DOWN_COMPLETE       BIT(15)
+#define GDSC_RETAIN_FF_ENABLE          BIT(11)
 #define CFG_GDSCR_OFFSET               0x4
 
 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
@@ -216,6 +218,14 @@ static inline void gdsc_assert_reset_aon(struct gdsc *sc)
        regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
                           GMEM_RESET_MASK, 0);
 }
+
+static void gdsc_retain_ff_on(struct gdsc *sc)
+{
+       u32 mask = GDSC_RETAIN_FF_ENABLE;
+
+       regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
+}
+
 static int gdsc_enable(struct generic_pm_domain *domain)
 {
        struct gdsc *sc = domain_to_gdsc(domain);
@@ -268,6 +278,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
                udelay(1);
        }
 
+       if (sc->flags & RETAIN_FF_ENABLE)
+               gdsc_retain_ff_on(sc);
+
        return 0;
 }
 
@@ -433,3 +446,29 @@ void gdsc_unregister(struct gdsc_desc *desc)
        }
        of_genpd_del_provider(dev->of_node);
 }
+
+/*
+ * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
+ * running in the CX domain so the CPU doesn't need to know anything about the
+ * GX domain EXCEPT....
+ *
+ * Hardware constraints dictate that the GX be powered down before the CX. If
+ * the GMU crashes it could leave the GX on. In order to successfully bring back
+ * the device the CPU needs to disable the GX headswitch. There being no sane
+ * way to reach in and touch that register from deep inside the GPU driver we
+ * need to set up the infrastructure to be able to ensure that the GPU can
+ * ensure that the GX is off during this super special case. We do this by
+ * defining a GX gdsc with a dummy enable function and a "default" disable
+ * function.
+ *
+ * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
+ * driver. During power up, nothing will happen from the CPU (and the GMU will
+ * power up normally but during power down this will ensure that the GX domain
+ * is *really* off - this gives us a semi standard way of doing what we need.
+ */
+int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
+{
+       /* Do nothing but give genpd the impression that we were successful */
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);
index c36fc26..bd53743 100644 (file)
@@ -50,6 +50,7 @@ struct gdsc {
 #define AON_RESET      BIT(4)
 #define POLL_CFG_GDSCR BIT(5)
 #define ALWAYS_ON      BIT(6)
+#define RETAIN_FF_ENABLE       BIT(7)
        struct reset_controller_dev     *rcdev;
        unsigned int                    *resets;
        unsigned int                    reset_count;
@@ -68,6 +69,7 @@ struct gdsc_desc {
 int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
                  struct regmap *);
 void gdsc_unregister(struct gdsc_desc *desc);
+int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain);
 #else
 static inline int gdsc_register(struct gdsc_desc *desc,
                                struct reset_controller_dev *rcdev,
index 7b656b6..88a739b 100644 (file)
@@ -170,37 +170,12 @@ static struct gdsc cx_gdsc = {
        .flags = VOTABLE,
 };
 
-/*
- * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU
- * running in the CX domain so the CPU doesn't need to know anything about the
- * GX domain EXCEPT....
- *
- * Hardware constraints dictate that the GX be powered down before the CX. If
- * the GMU crashes it could leave the GX on. In order to successfully bring back
- * the device the CPU needs to disable the GX headswitch. There being no sane
- * way to reach in and touch that register from deep inside the GPU driver we
- * need to set up the infrastructure to be able to ensure that the GPU can
- * ensure that the GX is off during this super special case. We do this by
- * defining a GX gdsc with a dummy enable function and a "default" disable
- * function.
- *
- * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
- * driver. During power up, nothing will happen from the CPU (and the GMU will
- * power up normally but during power down this will ensure that the GX domain
- * is *really* off - this gives us a semi standard way of doing what we need.
- */
-static int gx_gdsc_enable(struct generic_pm_domain *domain)
-{
-       /* Do nothing but give genpd the impression that we were successful */
-       return 0;
-}
-
 static struct gdsc gx_gdsc = {
        .gdscr = 0x100c,
        .clamp_io_ctrl = 0x1508,
        .pd = {
                .name = "gx_gdsc",
-               .power_on = gx_gdsc_enable,
+               .power_on = gdsc_gx_do_nothing_enable,
        },
        .pwrsts = PWRSTS_OFF_ON,
        .flags = CLAMP_IO,
index e40efba..5663698 100644 (file)
@@ -131,37 +131,12 @@ static struct gdsc gpu_cx_gdsc = {
        .flags = VOTABLE,
 };
 
-/*
- * On SDM845 the GPU GX domain is *almost* entirely controlled by the GMU
- * running in the CX domain so the CPU doesn't need to know anything about the
- * GX domain EXCEPT....
- *
- * Hardware constraints dictate that the GX be powered down before the CX. If
- * the GMU crashes it could leave the GX on. In order to successfully bring back
- * the device the CPU needs to disable the GX headswitch. There being no sane
- * way to reach in and touch that register from deep inside the GPU driver we
- * need to set up the infrastructure to be able to ensure that the GPU can
- * ensure that the GX is off during this super special case. We do this by
- * defining a GX gdsc with a dummy enable function and a "default" disable
- * function.
- *
- * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
- * driver. During power up, nothing will happen from the CPU (and the GMU will
- * power up normally but during power down this will ensure that the GX domain
- * is *really* off - this gives us a semi standard way of doing what we need.
- */
-static int gx_gdsc_enable(struct generic_pm_domain *domain)
-{
-       /* Do nothing but give genpd the impression that we were successful */
-       return 0;
-}
-
 static struct gdsc gpu_gx_gdsc = {
        .gdscr = 0x100c,
        .clamp_io_ctrl = 0x1508,
        .pd = {
                .name = "gpu_gx_gdsc",
-               .power_on = gx_gdsc_enable,
+               .power_on = gdsc_gx_do_nothing_enable,
        },
        .pwrsts = PWRSTS_OFF_ON,
        .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
new file mode 100644 (file)
index 0000000..27c4075
--- /dev/null
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8150.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_CORE_BI_PLL_TEST_SE,
+       P_GPLL0_OUT_MAIN,
+       P_GPLL0_OUT_MAIN_DIV,
+       P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco trion_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static struct alpha_pll_config gpu_cc_pll1_config = {
+       .l = 0x1a,
+       .alpha = 0xaaa,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002267,
+       .config_ctl_hi1_val = 0x00000024,
+       .test_ctl_val = 0x00000000,
+       .test_ctl_hi_val = 0x00000002,
+       .test_ctl_hi1_val = 0x00000000,
+       .user_ctl_val = 0x00000000,
+       .user_ctl_hi_val = 0x00000805,
+       .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+       .offset = 0x100,
+       .vco_table = trion_vco,
+       .num_vco = ARRAY_SIZE(trion_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_pll1",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_trion_ops,
+               },
+       },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+       { P_GPLL0_OUT_MAIN, 5 },
+       { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .fw_name = "gcc_gpu_gpll0_clk_src" },
+       { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+       F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+       .cmd_rcgr = 0x1120,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = gpu_cc_parent_map_0,
+       .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpu_cc_gmu_clk_src",
+               .parent_data = gpu_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+       .halt_reg = 0x1078,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1078,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+       .halt_reg = 0x107c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x107c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_crc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+       .halt_reg = 0x1088,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1088,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_apb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+       .halt_reg = 0x1098,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1098,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gmu_clk",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+       .halt_reg = 0x108c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x108c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_snoc_dvm_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+       .halt_reg = 0x1004,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_aon_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+       .halt_reg = 0x109c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x109c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+       .halt_reg = 0x1064,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1064,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_gmu_clk",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+       .gdscr = 0x106c,
+       .gds_hw_ctrl = 0x1540,
+       .pd = {
+               .name = "gpu_cx_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+       .gdscr = 0x100c,
+       .clamp_io_ctrl = 0x1508,
+       .pd = {
+               .name = "gpu_gx_gdsc",
+               .power_on = gdsc_gx_do_nothing_enable,
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8150_clocks[] = {
+       [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+       [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+       [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+       [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+       [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+       [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+       [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+       [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+       [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+       [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8150_resets[] = {
+       [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+       [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+       [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+       [GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
+       [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8150_gdscs[] = {
+       [GPU_CX_GDSC] = &gpu_cx_gdsc,
+       [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8150_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x8008,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8150_desc = {
+       .config = &gpu_cc_sm8150_regmap_config,
+       .clks = gpu_cc_sm8150_clocks,
+       .num_clks = ARRAY_SIZE(gpu_cc_sm8150_clocks),
+       .resets = gpu_cc_sm8150_resets,
+       .num_resets = ARRAY_SIZE(gpu_cc_sm8150_resets),
+       .gdscs = gpu_cc_sm8150_gdscs,
+       .num_gdscs = ARRAY_SIZE(gpu_cc_sm8150_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8150_match_table[] = {
+       { .compatible = "qcom,sm8150-gpucc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8150_match_table);
+
+static int gpu_cc_sm8150_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &gpu_cc_sm8150_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+       return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8150_driver = {
+       .probe = gpu_cc_sm8150_probe,
+       .driver = {
+               .name = "sm8150-gpucc",
+               .of_match_table = gpu_cc_sm8150_match_table,
+       },
+};
+
+static int __init gpu_cc_sm8150_init(void)
+{
+       return platform_driver_register(&gpu_cc_sm8150_driver);
+}
+subsys_initcall(gpu_cc_sm8150_init);
+
+static void __exit gpu_cc_sm8150_exit(void)
+{
+       platform_driver_unregister(&gpu_cc_sm8150_driver);
+}
+module_exit(gpu_cc_sm8150_exit);
+
+MODULE_DESCRIPTION("QTI GPUCC SM8150 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
new file mode 100644 (file)
index 0000000..3fa7d1f
--- /dev/null
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sm8250.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK         0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT                4
+#define CX_GMU_CBCR_WAKE_MASK          0xf
+#define CX_GMU_CBCR_WAKE_SHIFT         8
+
+enum {
+       P_BI_TCXO,
+       P_CORE_BI_PLL_TEST_SE,
+       P_GPLL0_OUT_MAIN,
+       P_GPLL0_OUT_MAIN_DIV,
+       P_GPU_CC_PLL0_OUT_MAIN,
+       P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+       .l = 0x1a,
+       .alpha = 0xaaa,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002261,
+       .config_ctl_hi1_val = 0x029a699c,
+       .user_ctl_val = 0x00000000,
+       .user_ctl_hi_val = 0x00000805,
+       .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+       .offset = 0x100,
+       .vco_table = lucid_vco,
+       .num_vco = ARRAY_SIZE(lucid_vco),
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_pll1",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_lucid_ops,
+               },
+       },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+       { P_GPLL0_OUT_MAIN, 5 },
+       { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &gpu_cc_pll1.clkr.hw },
+       { .fw_name = "gcc_gpu_gpll0_clk_src" },
+       { .fw_name = "gcc_gpu_gpll0_div_clk_src" },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+       F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+       .cmd_rcgr = 0x1120,
+       .mnd_width = 0,
+       .hid_width = 5,
+       .parent_map = gpu_cc_parent_map_0,
+       .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpu_cc_gmu_clk_src",
+               .parent_data = gpu_cc_parent_data_0,
+               .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+       .halt_reg = 0x1078,
+       .halt_check = BRANCH_HALT_DELAY,
+       .clkr = {
+               .enable_reg = 0x1078,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+       .halt_reg = 0x107c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x107c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_crc_ahb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+       .halt_reg = 0x1088,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1088,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_apb_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+       .halt_reg = 0x1098,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1098,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_gmu_clk",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+       .halt_reg = 0x108c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x108c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cx_snoc_dvm_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+       .halt_reg = 0x1004,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1004,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_aon_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+       .halt_reg = 0x109c,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x109c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_cxo_clk",
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+       .halt_reg = 0x1064,
+       .halt_check = BRANCH_HALT,
+       .clkr = {
+               .enable_reg = 0x1064,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpu_cc_gx_gmu_clk",
+                       .parent_data =  &(const struct clk_parent_data){
+                               .hw = &gpu_cc_gmu_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+       .halt_reg = 0x5000,
+       .halt_check = BRANCH_VOTED,
+       .clkr = {
+               .enable_reg = 0x5000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                        .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+                        .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+       .gdscr = 0x106c,
+       .gds_hw_ctrl = 0x1540,
+       .pd = {
+               .name = "gpu_cx_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+       .gdscr = 0x100c,
+       .clamp_io_ctrl = 0x1508,
+       .pd = {
+               .name = "gpu_gx_gdsc",
+               .power_on = gdsc_gx_do_nothing_enable,
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8250_clocks[] = {
+       [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+       [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+       [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+       [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+       [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+       [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+       [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+       [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+       [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+       [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+       [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8250_resets[] = {
+       [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+       [GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+       [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+       [GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+       [GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+       [GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct gdsc *gpu_cc_sm8250_gdscs[] = {
+       [GPU_CX_GDSC] = &gpu_cx_gdsc,
+       [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8250_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = 0x8008,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8250_desc = {
+       .config = &gpu_cc_sm8250_regmap_config,
+       .clks = gpu_cc_sm8250_clocks,
+       .num_clks = ARRAY_SIZE(gpu_cc_sm8250_clocks),
+       .resets = gpu_cc_sm8250_resets,
+       .num_resets = ARRAY_SIZE(gpu_cc_sm8250_resets),
+       .gdscs = gpu_cc_sm8250_gdscs,
+       .num_gdscs = ARRAY_SIZE(gpu_cc_sm8250_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8250_match_table[] = {
+       { .compatible = "qcom,sm8250-gpucc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8250_match_table);
+
+static int gpu_cc_sm8250_probe(struct platform_device *pdev)
+{
+       struct regmap *regmap;
+       unsigned int value, mask;
+
+       regmap = qcom_cc_map(pdev, &gpu_cc_sm8250_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+       /*
+        * Configure gpu_cc_cx_gmu_clk with recommended
+        * wakeup/sleep settings
+        */
+       mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+       mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+       value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+
+       return qcom_cc_really_probe(pdev, &gpu_cc_sm8250_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8250_driver = {
+       .probe = gpu_cc_sm8250_probe,
+       .driver = {
+               .name = "sm8250-gpucc",
+               .of_match_table = gpu_cc_sm8250_match_table,
+       },
+};
+
+static int __init gpu_cc_sm8250_init(void)
+{
+       return platform_driver_register(&gpu_cc_sm8250_driver);
+}
+subsys_initcall(gpu_cc_sm8250_init);
+
+static void __exit gpu_cc_sm8250_exit(void)
+{
+       platform_driver_unregister(&gpu_cc_sm8250_driver);
+}
+module_exit(gpu_cc_sm8250_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
new file mode 100644 (file)
index 0000000..d4c1864
--- /dev/null
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+       P_BI_TCXO,
+       P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD,
+       P_SLEEP_CLK,
+};
+
+static struct pll_vco fabia_vco[] = {
+       { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
+       .l = 0x20,
+       .alpha = 0x0,
+       .config_ctl_val = 0x20485699,
+       .config_ctl_hi_val = 0x00002067,
+       .test_ctl_val = 0x40000000,
+       .test_ctl_hi_val = 0x00000000,
+       .user_ctl_val = 0x00005105,
+       .user_ctl_hi_val = 0x00004805,
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+       [CLK_ALPHA_PLL_TYPE_FABIA] =  {
+               [PLL_OFF_L_VAL] = 0x04,
+               [PLL_OFF_CAL_L_VAL] = 0x8,
+               [PLL_OFF_USER_CTL] = 0x0c,
+               [PLL_OFF_USER_CTL_U] = 0x10,
+               [PLL_OFF_USER_CTL_U1] = 0x14,
+               [PLL_OFF_CONFIG_CTL] = 0x18,
+               [PLL_OFF_CONFIG_CTL_U] = 0x1C,
+               [PLL_OFF_CONFIG_CTL_U1] = 0x20,
+               [PLL_OFF_TEST_CTL] = 0x24,
+               [PLL_OFF_TEST_CTL_U] = 0x28,
+               [PLL_OFF_STATUS] = 0x30,
+               [PLL_OFF_OPMODE] = 0x38,
+               [PLL_OFF_FRAC] = 0x40,
+       },
+};
+
+static struct clk_alpha_pll lpass_lpaaudio_dig_pll = {
+       .offset = 0x1000,
+       .vco_table = fabia_vco,
+       .num_vco = ARRAY_SIZE(fabia_vco),
+       .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "lpass_lpaaudio_dig_pll",
+                       .parent_data = &(const struct clk_parent_data){
+                               .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fabia_ops,
+               },
+       },
+};
+
+static const struct clk_div_table
+                       post_div_table_lpass_lpaaudio_dig_pll_out_odd[] = {
+       { 0x5, 5 },
+       { }
+};
+
+static struct clk_alpha_pll_postdiv lpass_lpaaudio_dig_pll_out_odd = {
+       .offset = 0x1000,
+       .post_div_shift = 12,
+       .post_div_table = post_div_table_lpass_lpaaudio_dig_pll_out_odd,
+       .num_post_div =
+               ARRAY_SIZE(post_div_table_lpass_lpaaudio_dig_pll_out_odd),
+       .width = 4,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpass_lpaaudio_dig_pll_out_odd",
+               .parent_data = &(const struct clk_parent_data){
+                       .hw = &lpass_lpaaudio_dig_pll.clkr.hw,
+               },
+               .num_parents = 1,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+       },
+};
+
+static const struct parent_map lpass_core_cc_parent_map_0[] = {
+       { P_BI_TCXO, 0 },
+       { P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5 },
+};
+
+static const struct clk_parent_data lpass_core_cc_parent_data_0[] = {
+       { .fw_name = "bi_tcxo" },
+       { .hw = &lpass_lpaaudio_dig_pll_out_odd.clkr.hw },
+};
+
+static const struct parent_map lpass_core_cc_parent_map_2[] = {
+       { P_BI_TCXO, 0 },
+};
+
+static struct clk_rcg2 core_clk_src = {
+       .cmd_rcgr = 0x1d000,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = lpass_core_cc_parent_map_2,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "core_clk_src",
+               .parent_data = &(const struct clk_parent_data){
+                       .fw_name = "bi_tcxo",
+               },
+               .num_parents = 1,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_ext_mclk0_clk_src[] = {
+       F(9600000, P_BI_TCXO, 2, 0, 0),
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       { }
+};
+
+static const struct freq_tbl ftbl_ext_lpaif_clk_src[] = {
+       F(256000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 32),
+       F(512000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 16),
+       F(768000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 16),
+       F(1024000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 8),
+       F(1536000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 8),
+       F(2048000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 4),
+       F(3072000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 4),
+       F(4096000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 2),
+       F(6144000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 2),
+       F(8192000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 0, 0),
+       F(9600000, P_BI_TCXO, 2, 0, 0),
+       F(12288000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 0, 0),
+       F(19200000, P_BI_TCXO, 1, 0, 0),
+       F(24576000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ext_mclk0_clk_src = {
+       .cmd_rcgr = 0x20000,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = lpass_core_cc_parent_map_0,
+       .freq_tbl = ftbl_ext_mclk0_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ext_mclk0_clk_src",
+               .parent_data = lpass_core_cc_parent_data_0,
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 lpaif_pri_clk_src = {
+       .cmd_rcgr = 0x10000,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = lpass_core_cc_parent_map_0,
+       .freq_tbl = ftbl_ext_lpaif_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpaif_pri_clk_src",
+               .parent_data = lpass_core_cc_parent_data_0,
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 lpaif_sec_clk_src = {
+       .cmd_rcgr = 0x11000,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = lpass_core_cc_parent_map_0,
+       .freq_tbl = ftbl_ext_lpaif_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "lpaif_sec_clk_src",
+               .parent_data = lpass_core_cc_parent_data_0,
+               .num_parents = 2,
+               .flags = CLK_SET_RATE_PARENT,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch lpass_audio_core_ext_mclk0_clk = {
+       .halt_reg = 0x20014,
+       .halt_check = BRANCH_HALT,
+       .hwcg_reg = 0x20014,
+       .hwcg_bit = 1,
+       .clkr = {
+               .enable_reg = 0x20014,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "lpass_audio_core_ext_mclk0_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &ext_mclk0_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch lpass_audio_core_lpaif_pri_ibit_clk = {
+       .halt_reg = 0x10018,
+       .halt_check = BRANCH_HALT,
+       .hwcg_reg = 0x10018,
+       .hwcg_bit = 1,
+       .clkr = {
+               .enable_reg = 0x10018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "lpass_audio_core_lpaif_pri_ibit_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &lpaif_pri_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch lpass_audio_core_lpaif_sec_ibit_clk = {
+       .halt_reg = 0x11018,
+       .halt_check = BRANCH_HALT,
+       .hwcg_reg = 0x11018,
+       .hwcg_bit = 1,
+       .clkr = {
+               .enable_reg = 0x11018,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "lpass_audio_core_lpaif_sec_ibit_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &lpaif_sec_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch lpass_audio_core_sysnoc_mport_core_clk = {
+       .halt_reg = 0x23000,
+       .halt_check = BRANCH_HALT,
+       .hwcg_reg = 0x23000,
+       .hwcg_bit = 1,
+       .clkr = {
+               .enable_reg = 0x23000,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "lpass_audio_core_sysnoc_mport_core_clk",
+                       .parent_data = &(const struct clk_parent_data){
+                               .hw = &core_clk_src.clkr.hw,
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_regmap *lpass_core_cc_sc7180_clocks[] = {
+       [EXT_MCLK0_CLK_SRC] = &ext_mclk0_clk_src.clkr,
+       [LPAIF_PRI_CLK_SRC] = &lpaif_pri_clk_src.clkr,
+       [LPAIF_SEC_CLK_SRC] = &lpaif_sec_clk_src.clkr,
+       [CORE_CLK_SRC] = &core_clk_src.clkr,
+       [LPASS_AUDIO_CORE_EXT_MCLK0_CLK] = &lpass_audio_core_ext_mclk0_clk.clkr,
+       [LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK] =
+               &lpass_audio_core_lpaif_pri_ibit_clk.clkr,
+       [LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK] =
+               &lpass_audio_core_lpaif_sec_ibit_clk.clkr,
+       [LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK] =
+               &lpass_audio_core_sysnoc_mport_core_clk.clkr,
+       [LPASS_LPAAUDIO_DIG_PLL] = &lpass_lpaaudio_dig_pll.clkr,
+       [LPASS_LPAAUDIO_DIG_PLL_OUT_ODD] = &lpass_lpaaudio_dig_pll_out_odd.clkr,
+};
+
+static struct gdsc lpass_pdc_hm_gdsc = {
+       .gdscr = 0x3090,
+       .pd = {
+               .name = "lpass_pdc_hm_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = VOTABLE,
+};
+
+static struct gdsc lpass_audio_hm_gdsc = {
+       .gdscr = 0x9090,
+       .pd = {
+               .name = "lpass_audio_hm_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc lpass_core_hm_gdsc = {
+       .gdscr = 0x0,
+       .pd = {
+               .name = "lpass_core_hm_gdsc",
+       },
+       .pwrsts = PWRSTS_OFF_ON,
+       .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *lpass_core_hm_sc7180_gdscs[] = {
+       [LPASS_CORE_HM_GDSCR] = &lpass_core_hm_gdsc,
+};
+
+static struct gdsc *lpass_audio_hm_sc7180_gdscs[] = {
+       [LPASS_PDC_HM_GDSCR] = &lpass_pdc_hm_gdsc,
+       [LPASS_AUDIO_HM_GDSCR] = &lpass_audio_hm_gdsc,
+};
+
+static struct regmap_config lpass_core_cc_sc7180_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .fast_io = true,
+};
+
+static const struct qcom_cc_desc lpass_core_hm_sc7180_desc = {
+       .config = &lpass_core_cc_sc7180_regmap_config,
+       .gdscs = lpass_core_hm_sc7180_gdscs,
+       .num_gdscs = ARRAY_SIZE(lpass_core_hm_sc7180_gdscs),
+};
+
+static const struct qcom_cc_desc lpass_core_cc_sc7180_desc = {
+       .config = &lpass_core_cc_sc7180_regmap_config,
+       .clks = lpass_core_cc_sc7180_clocks,
+       .num_clks = ARRAY_SIZE(lpass_core_cc_sc7180_clocks),
+};
+
+static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
+       .config = &lpass_core_cc_sc7180_regmap_config,
+       .gdscs = lpass_audio_hm_sc7180_gdscs,
+       .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
+};
+
+static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+{
+       const struct qcom_cc_desc *desc;
+       struct regmap *regmap;
+       int ret;
+
+       lpass_core_cc_sc7180_regmap_config.name = "lpass_audio_cc";
+       desc = &lpass_audio_hm_sc7180_desc;
+       ret = qcom_cc_probe_by_index(pdev, 1, desc);
+       if (ret)
+               return ret;
+
+       lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc";
+       regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /*
+        * Keep the CLK always-ON
+        * LPASS_AUDIO_CORE_SYSNOC_SWAY_CORE_CLK
+        */
+       regmap_update_bits(regmap, 0x24000, BIT(0), BIT(0));
+
+       /* PLL settings */
+       regmap_write(regmap, 0x1008, 0x20);
+       regmap_update_bits(regmap, 0x1014, BIT(0), BIT(0));
+
+       clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
+                               &lpass_lpaaudio_dig_pll_config);
+
+       return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+}
+
+static int lpass_hm_core_probe(struct platform_device *pdev)
+{
+       const struct qcom_cc_desc *desc;
+
+       lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
+       desc = &lpass_core_hm_sc7180_desc;
+
+       return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
+       {
+               .compatible = "qcom,sc7180-lpasshm",
+               .data = lpass_hm_core_probe,
+       },
+       {
+               .compatible = "qcom,sc7180-lpasscorecc",
+               .data = lpass_core_cc_sc7180_probe,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7180_match_table);
+
+static int lpass_core_sc7180_probe(struct platform_device *pdev)
+{
+       int (*clk_probe)(struct platform_device *p);
+       int ret;
+
+       pm_runtime_enable(&pdev->dev);
+       ret = pm_clk_create(&pdev->dev);
+       if (ret)
+               return ret;
+
+       ret = pm_clk_add(&pdev->dev, "iface");
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to acquire iface clock\n");
+               goto disable_pm_runtime;
+       }
+
+       clk_probe = of_device_get_match_data(&pdev->dev);
+       if (!clk_probe)
+               return -EINVAL;
+
+       ret = clk_probe(pdev);
+       if (ret)
+               goto destroy_pm_clk;
+
+       return 0;
+
+destroy_pm_clk:
+       pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+       pm_runtime_disable(&pdev->dev);
+
+       return ret;
+}
+
+static const struct dev_pm_ops lpass_core_cc_pm_ops = {
+       SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_core_cc_sc7180_driver = {
+       .probe = lpass_core_sc7180_probe,
+       .driver = {
+               .name = "lpass_core_cc-sc7180",
+               .of_match_table = lpass_core_cc_sc7180_match_table,
+               .pm = &lpass_core_cc_pm_ops,
+       },
+};
+
+static int __init lpass_core_cc_sc7180_init(void)
+{
+       return platform_driver_register(&lpass_core_cc_sc7180_driver);
+}
+subsys_initcall(lpass_core_cc_sc7180_init);
+
+static void __exit lpass_core_cc_sc7180_exit(void)
+{
+       platform_driver_unregister(&lpass_core_cc_sc7180_driver);
+}
+module_exit(lpass_core_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
index 10560d9..4c6c916 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/clk-provider.h>
+#include <linux/iopoll.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include "clk.h"
@@ -86,23 +87,14 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
 {
        struct regmap *grf = pll->ctx->grf;
        unsigned int val;
-       int delay = 24000000, ret;
-
-       while (delay > 0) {
-               ret = regmap_read(grf, pll->lock_offset, &val);
-               if (ret) {
-                       pr_err("%s: failed to read pll lock status: %d\n",
-                              __func__, ret);
-                       return ret;
-               }
+       int ret;
 
-               if (val & BIT(pll->lock_shift))
-                       return 0;
-               delay--;
-       }
+       ret = regmap_read_poll_timeout(grf, pll->lock_offset, val,
+                                      val & BIT(pll->lock_shift), 0, 1000);
+       if (ret)
+               pr_err("%s: timeout waiting for pll to lock\n", __func__);
 
-       pr_err("%s: timeout waiting for pll to lock\n", __func__);
-       return -ETIMEDOUT;
+       return ret;
 }
 
 /**
@@ -118,12 +110,31 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
 #define RK3036_PLLCON1_REFDIV_SHIFT            0
 #define RK3036_PLLCON1_POSTDIV2_MASK           0x7
 #define RK3036_PLLCON1_POSTDIV2_SHIFT          6
+#define RK3036_PLLCON1_LOCK_STATUS             BIT(10)
 #define RK3036_PLLCON1_DSMPD_MASK              0x1
 #define RK3036_PLLCON1_DSMPD_SHIFT             12
+#define RK3036_PLLCON1_PWRDOWN                 BIT(13)
 #define RK3036_PLLCON2_FRAC_MASK               0xffffff
 #define RK3036_PLLCON2_FRAC_SHIFT              0
 
-#define RK3036_PLLCON1_PWRDOWN                 (1 << 13)
+static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+       u32 pllcon;
+       int ret;
+
+       /*
+        * Lock time typical 250, max 500 input clock cycles @24MHz
+        * So define a very safe maximum of 1000us, meaning 24000 cycles.
+        */
+       ret = readl_relaxed_poll_timeout(pll->reg_base + RK3036_PLLCON(1),
+                                        pllcon,
+                                        pllcon & RK3036_PLLCON1_LOCK_STATUS,
+                                        0, 1000);
+       if (ret)
+               pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+       return ret;
+}
 
 static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll,
                                        struct rockchip_pll_rate_table *rate)
@@ -221,7 +232,7 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
        writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2));
 
        /* wait for the pll to lock */
-       ret = rockchip_pll_wait_lock(pll);
+       ret = rockchip_rk3036_pll_wait_lock(pll);
        if (ret) {
                pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
                        __func__);
@@ -260,7 +271,7 @@ static int rockchip_rk3036_pll_enable(struct clk_hw *hw)
 
        writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0),
               pll->reg_base + RK3036_PLLCON(1));
-       rockchip_pll_wait_lock(pll);
+       rockchip_rk3036_pll_wait_lock(pll);
 
        return 0;
 }
@@ -589,19 +600,20 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
 static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll)
 {
        u32 pllcon;
-       int delay = 24000000;
-
-       /* poll check the lock status in rk3399 xPLLCON2 */
-       while (delay > 0) {
-               pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
-               if (pllcon & RK3399_PLLCON2_LOCK_STATUS)
-                       return 0;
+       int ret;
 
-               delay--;
-       }
+       /*
+        * Lock time typical 250, max 500 input clock cycles @24MHz
+        * So define a very safe maximum of 1000us, meaning 24000 cycles.
+        */
+       ret = readl_relaxed_poll_timeout(pll->reg_base + RK3399_PLLCON(2),
+                                        pllcon,
+                                        pllcon & RK3399_PLLCON2_LOCK_STATUS,
+                                        0, 1000);
+       if (ret)
+               pr_err("%s: timeout waiting for pll to lock\n", __func__);
 
-       pr_err("%s: timeout waiting for pll to lock\n", __func__);
-       return -ETIMEDOUT;
+       return ret;
 }
 
 static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll,
index 77aebfb..730020f 100644 (file)
@@ -751,6 +751,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
        "pclk_peri",
        "hclk_cpubus",
        "hclk_vio_bus",
+       "sclk_mac_lbtest",
 };
 
 static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np)
index cc2a177..93c7946 100644 (file)
 #define RK3288_GRF_SOC_CON(x)  (0x244 + x * 4)
 #define RK3288_GRF_SOC_STATUS1 0x284
 
+enum rk3288_variant {
+       RK3288_CRU,
+       RK3288W_CRU,
+};
+
 enum rk3288_plls {
        apll, dpll, cpll, gpll, npll,
 };
@@ -425,8 +430,6 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(3), 0, GFLAGS),
-       DIV(0, "hclk_vio", "aclk_vio0", 0,
-                       RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
        COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3288_CLKGATE_CON(3), 2, GFLAGS),
@@ -819,6 +822,16 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS),
 };
 
+static struct rockchip_clk_branch rk3288w_hclkvio_branch[] __initdata = {
+       DIV(0, "hclk_vio", "aclk_vio1", 0,
+                       RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+};
+
+static struct rockchip_clk_branch rk3288_hclkvio_branch[] __initdata = {
+       DIV(0, "hclk_vio", "aclk_vio0", 0,
+                       RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+};
+
 static const char *const rk3288_critical_clocks[] __initconst = {
        "aclk_cpu",
        "aclk_peri",
@@ -914,7 +927,8 @@ static struct syscore_ops rk3288_clk_syscore_ops = {
        .resume = rk3288_clk_resume,
 };
 
-static void __init rk3288_clk_init(struct device_node *np)
+static void __init rk3288_common_init(struct device_node *np,
+                                     enum rk3288_variant soc)
 {
        struct rockchip_clk_provider *ctx;
 
@@ -936,6 +950,14 @@ static void __init rk3288_clk_init(struct device_node *np)
                                   RK3288_GRF_SOC_STATUS1);
        rockchip_clk_register_branches(ctx, rk3288_clk_branches,
                                  ARRAY_SIZE(rk3288_clk_branches));
+
+       if (soc == RK3288W_CRU)
+               rockchip_clk_register_branches(ctx, rk3288w_hclkvio_branch,
+                                              ARRAY_SIZE(rk3288w_hclkvio_branch));
+       else
+               rockchip_clk_register_branches(ctx, rk3288_hclkvio_branch,
+                                              ARRAY_SIZE(rk3288_hclkvio_branch));
+
        rockchip_clk_protect_critical(rk3288_critical_clocks,
                                      ARRAY_SIZE(rk3288_critical_clocks));
 
@@ -954,4 +976,15 @@ static void __init rk3288_clk_init(struct device_node *np)
 
        rockchip_clk_of_add_provider(np, ctx);
 }
+
+static void __init rk3288_clk_init(struct device_node *np)
+{
+       rk3288_common_init(np, RK3288_CRU);
+}
 CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
+
+static void __init rk3288w_clk_init(struct device_node *np)
+{
+       rk3288_common_init(np, RK3288W_CRU);
+}
+CLK_OF_DECLARE(rk3288w_cru, "rockchip,rk3288w-cru", rk3288w_clk_init);
index c186a19..2429b7c 100644 (file)
@@ -808,22 +808,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
        MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
            RK3328_SDMMC_CON0, 1),
        MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
-           RK3328_SDMMC_CON1, 0),
+           RK3328_SDMMC_CON1, 1),
 
        MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
            RK3328_SDIO_CON0, 1),
        MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
-           RK3328_SDIO_CON1, 0),
+           RK3328_SDIO_CON1, 1),
 
        MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
            RK3328_EMMC_CON0, 1),
        MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
-           RK3328_EMMC_CON1, 0),
+           RK3328_EMMC_CON1, 1),
 
        MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
            RK3328_SDMMC_EXT_CON0, 1),
        MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
-           RK3328_SDMMC_EXT_CON1, 0),
+           RK3328_SDMMC_EXT_CON1, 1),
 };
 
 static const char *const rk3328_critical_clocks[] __initconst = {
index c84d5ba..b95483b 100644 (file)
@@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
 
        for (i = pll1; i < maxclk; i++) {
                atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
-               BUG_ON(!atlas6_clks[i]);
+               BUG_ON(IS_ERR(atlas6_clks[i]));
        }
        clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
        clk_register_clkdev(atlas6_clks[io],  NULL, "io");
index 0b212cf..f180c05 100644 (file)
@@ -327,16 +327,26 @@ int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll)
        return clk_pll_wait_for_lock(pll);
 }
 
+static bool pllm_clk_is_gated_by_pmc(struct tegra_clk_pll *pll)
+{
+       u32 val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+
+       return (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) &&
+             !(val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE);
+}
+
 static int clk_pll_is_enabled(struct clk_hw *hw)
 {
        struct tegra_clk_pll *pll = to_clk_pll(hw);
        u32 val;
 
-       if (pll->params->flags & TEGRA_PLLM) {
-               val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
-               if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
-                       return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
-       }
+       /*
+        * Power Management Controller (PMC) can override the PLLM clock
+        * settings, including the enable-state. The PLLM is enabled when
+        * PLLM's CaR state is ON and when PLLM isn't gated by PMC.
+        */
+       if ((pll->params->flags & TEGRA_PLLM) && pllm_clk_is_gated_by_pmc(pll))
+               return 0;
 
        val = pll_readl_base(pll);
 
index 7c774ea..18564ef 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_PMC_ATOM)         += clk-pmc-atom.o
-obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE)  += clk-st.o
+obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE)  += clk-fch.o
 clk-x86-lpss-objs              := clk-lpt.o
 obj-$(CONFIG_X86_INTEL_LPSS)   += clk-x86-lpss.o
 obj-$(CONFIG_CLK_LGM_CGU)      += clk-cgu.o clk-cgu-pll.o clk-lgm.o
index c03cc6b..3179557 100644 (file)
@@ -128,7 +128,7 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx,
        pll->hw.init = &init;
 
        hw = &pll->hw;
-       ret = clk_hw_register(dev, hw);
+       ret = devm_clk_hw_register(dev, hw);
        if (ret)
                return ERR_PTR(ret);
 
index 56af0e0..33de600 100644 (file)
@@ -119,7 +119,7 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx,
        mux->hw.init = &init;
 
        hw = &mux->hw;
-       ret = clk_hw_register(dev, hw);
+       ret = devm_clk_hw_register(dev, hw);
        if (ret)
                return ERR_PTR(ret);
 
@@ -247,7 +247,7 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx,
        div->hw.init = &init;
 
        hw = &div->hw;
-       ret = clk_hw_register(dev, hw);
+       ret = devm_clk_hw_register(dev, hw);
        if (ret)
                return ERR_PTR(ret);
 
@@ -361,7 +361,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx,
        gate->hw.init = &init;
 
        hw = &gate->hw;
-       ret = clk_hw_register(dev, hw);
+       ret = devm_clk_hw_register(dev, hw);
        if (ret)
                return ERR_PTR(ret);
 
@@ -420,18 +420,14 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 {
        struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
        unsigned int div0, div1, exdiv;
-       unsigned long flags;
        u64 prate;
 
-       spin_lock_irqsave(&ddiv->lock, flags);
        div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
                               ddiv->shift0, ddiv->width0) + 1;
        div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
                               ddiv->shift1, ddiv->width1) + 1;
        exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
                                ddiv->shift2, ddiv->width2);
-       spin_unlock_irqrestore(&ddiv->lock, flags);
-
        prate = (u64)parent_rate;
        do_div(prate, div0);
        do_div(prate, div1);
@@ -548,24 +544,21 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
                div = div * 2;
                div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
        }
+       spin_unlock_irqrestore(&ddiv->lock, flags);
 
-       if (div <= 0) {
-               spin_unlock_irqrestore(&ddiv->lock, flags);
+       if (div <= 0)
                return *prate;
-       }
 
-       if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
-               if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
-                       spin_unlock_irqrestore(&ddiv->lock, flags);
+       if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
+               if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
                        return -EINVAL;
-               }
-       }
 
        rate64 = *prate;
        do_div(rate64, ddiv1);
        do_div(rate64, ddiv2);
 
        /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+       spin_lock_irqsave(&ddiv->lock, flags);
        if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
                rate64 = rate64 * 2;
                rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
@@ -588,19 +581,18 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
                          unsigned int nr_clk)
 {
        struct device *dev = ctx->dev;
-       struct clk_init_data init = {};
-       struct lgm_clk_ddiv *ddiv;
        struct clk_hw *hw;
        unsigned int idx;
        int ret;
 
        for (idx = 0; idx < nr_clk; idx++, list++) {
-               ddiv = NULL;
+               struct clk_init_data init = {};
+               struct lgm_clk_ddiv *ddiv;
+
                ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
                if (!ddiv)
                        return -ENOMEM;
 
-               memset(&init, 0, sizeof(init));
                init.name = list->name;
                init.ops = &lgm_clk_ddiv_ops;
                init.flags = list->flags;
@@ -624,7 +616,7 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
                ddiv->hw.init = &init;
 
                hw = &ddiv->hw;
-               ret = clk_hw_register(dev, hw);
+               ret = devm_clk_hw_register(dev, hw);
                if (ret) {
                        dev_err(dev, "register clk: %s failed!\n", list->name);
                        return ret;
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
new file mode 100644 (file)
index 0000000..8f7c514
--- /dev/null
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * clock framework for AMD Stoney based clocks
+ *
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_data/clk-fch.h>
+#include <linux/platform_device.h>
+
+/* Clock Driving Strength 2 register */
+#define CLKDRVSTR2     0x28
+/* Clock Control 1 register */
+#define MISCCLKCNTL1   0x40
+/* Auxiliary clock1 enable bit */
+#define OSCCLKENB      2
+/* 25Mhz auxiliary output clock freq bit */
+#define OSCOUT1CLK25MHZ        16
+
+#define ST_CLK_48M     0
+#define ST_CLK_25M     1
+#define ST_CLK_MUX     2
+#define ST_CLK_GATE    3
+#define ST_MAX_CLKS    4
+
+#define RV_CLK_48M     0
+#define RV_CLK_GATE    1
+#define RV_MAX_CLKS    2
+
+static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
+static struct clk_hw *hws[ST_MAX_CLKS];
+
+static int fch_clk_probe(struct platform_device *pdev)
+{
+       struct fch_clk_data *fch_data;
+
+       fch_data = dev_get_platdata(&pdev->dev);
+       if (!fch_data || !fch_data->base)
+               return -EINVAL;
+
+       if (!fch_data->is_rv) {
+               hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+                       NULL, 0, 48000000);
+               hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz",
+                       NULL, 0, 25000000);
+
+               hws[ST_CLK_MUX] = clk_hw_register_mux(NULL, "oscout1_mux",
+                       clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
+                       0, fch_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0,
+                       NULL);
+
+               clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
+
+               hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+                       "oscout1_mux", 0, fch_data->base + MISCCLKCNTL1,
+                       OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+
+               devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE],
+                       "oscout1", NULL);
+       } else {
+               hws[RV_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+                       NULL, 0, 48000000);
+
+               hws[RV_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+                       "clk48MHz", 0, fch_data->base + MISCCLKCNTL1,
+                       OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+
+               devm_clk_hw_register_clkdev(&pdev->dev, hws[RV_CLK_GATE],
+                       "oscout1", NULL);
+       }
+
+       return 0;
+}
+
+static int fch_clk_remove(struct platform_device *pdev)
+{
+       int i, clks;
+       struct fch_clk_data *fch_data;
+
+       fch_data = dev_get_platdata(&pdev->dev);
+
+       clks = fch_data->is_rv ? RV_MAX_CLKS : ST_MAX_CLKS;
+
+       for (i = 0; i < clks; i++)
+               clk_hw_unregister(hws[i]);
+
+       return 0;
+}
+
+static struct platform_driver fch_clk_driver = {
+       .driver = {
+               .name = "clk-fch",
+               .suppress_bind_attrs = true,
+       },
+       .probe = fch_clk_probe,
+       .remove = fch_clk_remove,
+};
+builtin_platform_driver(fch_clk_driver);
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
deleted file mode 100644 (file)
index 25d4b97..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * clock framework for AMD Stoney based clocks
- *
- * Copyright 2018 Advanced Micro Devices, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/platform_data/clk-st.h>
-#include <linux/platform_device.h>
-
-/* Clock Driving Strength 2 register */
-#define CLKDRVSTR2     0x28
-/* Clock Control 1 register */
-#define MISCCLKCNTL1   0x40
-/* Auxiliary clock1 enable bit */
-#define OSCCLKENB      2
-/* 25Mhz auxiliary output clock freq bit */
-#define OSCOUT1CLK25MHZ        16
-
-#define ST_CLK_48M     0
-#define ST_CLK_25M     1
-#define ST_CLK_MUX     2
-#define ST_CLK_GATE    3
-#define ST_MAX_CLKS    4
-
-static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
-static struct clk_hw *hws[ST_MAX_CLKS];
-
-static int st_clk_probe(struct platform_device *pdev)
-{
-       struct st_clk_data *st_data;
-
-       st_data = dev_get_platdata(&pdev->dev);
-       if (!st_data || !st_data->base)
-               return -EINVAL;
-
-       hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz", NULL, 0,
-                                                    48000000);
-       hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz", NULL, 0,
-                                                    25000000);
-
-       hws[ST_CLK_MUX] = clk_hw_register_mux(NULL, "oscout1_mux",
-               clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
-               0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
-
-       clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
-
-       hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
-               0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
-               CLK_GATE_SET_TO_DISABLE, NULL);
-
-       devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1",
-                                   NULL);
-
-       return 0;
-}
-
-static int st_clk_remove(struct platform_device *pdev)
-{
-       int i;
-
-       for (i = 0; i < ST_MAX_CLKS; i++)
-               clk_hw_unregister(hws[i]);
-       return 0;
-}
-
-static struct platform_driver st_clk_driver = {
-       .driver = {
-               .name = "clk-st",
-               .suppress_bind_attrs = true,
-       },
-       .probe = st_clk_probe,
-       .remove = st_clk_remove,
-};
-builtin_platform_driver(st_clk_driver);
index 2ed8b43..68b087b 100644 (file)
@@ -291,6 +291,10 @@ config CLKSRC_STM32
        select CLKSRC_MMIO
        select TIMER_OF
 
+config CLKSRC_STM32_LP
+       bool "Low power clocksource for STM32 SoCs"
+       depends on MFD_STM32_LPTIMER || COMPILE_TEST
+
 config CLKSRC_MPS2
        bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
        depends on GENERIC_SCHED_CLOCK
@@ -649,9 +653,8 @@ config ATCPIT100_TIMER
          This option enables support for the Andestech ATCPIT100 timers.
 
 config RISCV_TIMER
-       bool "Timer for the RISC-V platform"
+       bool "Timer for the RISC-V platform" if COMPILE_TEST
        depends on GENERIC_SCHED_CLOCK && RISCV
-       default y
        select TIMER_PROBE
        select TIMER_OF
        help
@@ -659,6 +662,15 @@ config RISCV_TIMER
          is accessed via both the SBI and the rdcycle instruction.  This is
          required for all RISC-V systems.
 
+config CLINT_TIMER
+       bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST
+       depends on GENERIC_SCHED_CLOCK && RISCV
+       select TIMER_PROBE
+       select TIMER_OF
+       help
+         This option enables the CLINT timer for RISC-V systems.  The CLINT
+         driver is usually used for NoMMU RISC-V systems.
+
 config CSKY_MP_TIMER
        bool "SMP Timer for the C-SKY platform" if COMPILE_TEST
        depends on CSKY
index 3994e22..1c444cc 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_BCM_KONA_TIMER)  += bcm_kona_timer.o
 obj-$(CONFIG_CADENCE_TTC_TIMER)        += timer-cadence-ttc.o
 obj-$(CONFIG_CLKSRC_EFM32)     += timer-efm32.o
 obj-$(CONFIG_CLKSRC_STM32)     += timer-stm32.o
+obj-$(CONFIG_CLKSRC_STM32_LP)  += timer-stm32-lp.o
 obj-$(CONFIG_CLKSRC_EXYNOS_MCT)        += exynos_mct.o
 obj-$(CONFIG_CLKSRC_LPC32XX)   += timer-lpc32xx.o
 obj-$(CONFIG_CLKSRC_MPS2)      += mps2-timer.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_CLKSRC_ST_LPC)           += clksrc_st_lpc.o
 obj-$(CONFIG_X86_NUMACHIP)             += numachip.o
 obj-$(CONFIG_ATCPIT100_TIMER)          += timer-atcpit100.o
 obj-$(CONFIG_RISCV_TIMER)              += timer-riscv.o
+obj-$(CONFIG_CLINT_TIMER)              += timer-clint.o
 obj-$(CONFIG_CSKY_MP_TIMER)            += timer-mp-csky.o
 obj-$(CONFIG_GX6605S_TIMER)            += timer-gx6605s.o
 obj-$(CONFIG_HYPERV_TIMER)             += hyperv_timer.o
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
new file mode 100644 (file)
index 0000000..8eeafa8
--- /dev/null
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
+ * CLINT MMIO timer device.
+ */
+
+#define pr_fmt(fmt) "clint: " fmt
+#include <linux/bitops.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/smp.h>
+
+#define CLINT_IPI_OFF          0
+#define CLINT_TIMER_CMP_OFF    0x4000
+#define CLINT_TIMER_VAL_OFF    0xbff8
+
+/* CLINT manages IPI and Timer for RISC-V M-mode  */
+static u32 __iomem *clint_ipi_base;
+static u64 __iomem *clint_timer_cmp;
+static u64 __iomem *clint_timer_val;
+static unsigned long clint_timer_freq;
+static unsigned int clint_timer_irq;
+
+static void clint_send_ipi(const struct cpumask *target)
+{
+       unsigned int cpu;
+
+       for_each_cpu(cpu, target)
+               writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
+}
+
+static void clint_clear_ipi(void)
+{
+       writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
+}
+
+static struct riscv_ipi_ops clint_ipi_ops = {
+       .ipi_inject = clint_send_ipi,
+       .ipi_clear = clint_clear_ipi,
+};
+
+#ifdef CONFIG_64BIT
+#define clint_get_cycles()     readq_relaxed(clint_timer_val)
+#else
+#define clint_get_cycles()     readl_relaxed(clint_timer_val)
+#define clint_get_cycles_hi()  readl_relaxed(((u32 *)clint_timer_val) + 1)
+#endif
+
+#ifdef CONFIG_64BIT
+static u64 notrace clint_get_cycles64(void)
+{
+       return clint_get_cycles();
+}
+#else /* CONFIG_64BIT */
+static u64 notrace clint_get_cycles64(void)
+{
+       u32 hi, lo;
+
+       do {
+               hi = clint_get_cycles_hi();
+               lo = clint_get_cycles();
+       } while (hi != clint_get_cycles_hi());
+
+       return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+static u64 clint_rdtime(struct clocksource *cs)
+{
+       return clint_get_cycles64();
+}
+
+static struct clocksource clint_clocksource = {
+       .name           = "clint_clocksource",
+       .rating         = 300,
+       .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+       .read           = clint_rdtime,
+};
+
+static int clint_clock_next_event(unsigned long delta,
+                                  struct clock_event_device *ce)
+{
+       void __iomem *r = clint_timer_cmp +
+                         cpuid_to_hartid_map(smp_processor_id());
+
+       csr_set(CSR_IE, IE_TIE);
+       writeq_relaxed(clint_get_cycles64() + delta, r);
+       return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = {
+       .name           = "clint_clockevent",
+       .features       = CLOCK_EVT_FEAT_ONESHOT,
+       .rating         = 100,
+       .set_next_event = clint_clock_next_event,
+};
+
+static int clint_timer_starting_cpu(unsigned int cpu)
+{
+       struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
+
+       ce->cpumask = cpumask_of(cpu);
+       clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+
+       enable_percpu_irq(clint_timer_irq,
+                         irq_get_trigger_type(clint_timer_irq));
+       return 0;
+}
+
+static int clint_timer_dying_cpu(unsigned int cpu)
+{
+       disable_percpu_irq(clint_timer_irq);
+       return 0;
+}
+
+static irqreturn_t clint_timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event);
+
+       csr_clear(CSR_IE, IE_TIE);
+       evdev->event_handler(evdev);
+
+       return IRQ_HANDLED;
+}
+
+static int __init clint_timer_init_dt(struct device_node *np)
+{
+       int rc;
+       u32 i, nr_irqs;
+       void __iomem *base;
+       struct of_phandle_args oirq;
+
+       /*
+        * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or
+        * RV_IRQ_SOFT. If it's anything else then we ignore the device.
+        */
+       nr_irqs = of_irq_count(np);
+       for (i = 0; i < nr_irqs; i++) {
+               if (of_irq_parse_one(np, i, &oirq)) {
+                       pr_err("%pOFP: failed to parse irq %d.\n", np, i);
+                       continue;
+               }
+
+               if ((oirq.args_count != 1) ||
+                   (oirq.args[0] != RV_IRQ_TIMER &&
+                    oirq.args[0] != RV_IRQ_SOFT)) {
+                       pr_err("%pOFP: invalid irq %d (hwirq %d)\n",
+                              np, i, oirq.args[0]);
+                       return -ENODEV;
+               }
+
+               /* Find parent irq domain and map timer irq */
+               if (!clint_timer_irq &&
+                   oirq.args[0] == RV_IRQ_TIMER &&
+                   irq_find_host(oirq.np))
+                       clint_timer_irq = irq_of_parse_and_map(np, i);
+       }
+
+       /* If CLINT timer irq not found then fail */
+       if (!clint_timer_irq) {
+               pr_err("%pOFP: timer irq not found\n", np);
+               return -ENODEV;
+       }
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("%pOFP: could not map registers\n", np);
+               return -ENODEV;
+       }
+
+       clint_ipi_base = base + CLINT_IPI_OFF;
+       clint_timer_cmp = base + CLINT_TIMER_CMP_OFF;
+       clint_timer_val = base + CLINT_TIMER_VAL_OFF;
+       clint_timer_freq = riscv_timebase;
+
+       pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
+
+       rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
+       if (rc) {
+               pr_err("%pOFP: clocksource register failed [%d]\n", np, rc);
+               goto fail_iounmap;
+       }
+
+       sched_clock_register(clint_get_cycles64, 64, clint_timer_freq);
+
+       rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt,
+                                "clint-timer", &clint_clock_event);
+       if (rc) {
+               pr_err("registering percpu irq failed [%d]\n", rc);
+               goto fail_iounmap;
+       }
+
+       rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING,
+                               "clockevents/clint/timer:starting",
+                               clint_timer_starting_cpu,
+                               clint_timer_dying_cpu);
+       if (rc) {
+               pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc);
+               goto fail_free_irq;
+       }
+
+       riscv_set_ipi_ops(&clint_ipi_ops);
+       clint_clear_ipi();
+
+       return 0;
+
+fail_free_irq:
+       free_irq(clint_timer_irq, &clint_clock_event);
+fail_iounmap:
+       iounmap(base);
+       return rc;
+}
+
+TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt);
+TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt);
index 9de1dab..c51c5ed 100644 (file)
 #include <linux/of_irq.h>
 #include <asm/smp.h>
 #include <asm/sbi.h>
-
-u64 __iomem *riscv_time_cmp;
-u64 __iomem *riscv_time_val;
-
-static inline void mmio_set_timer(u64 val)
-{
-       void __iomem *r;
-
-       r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
-       writeq_relaxed(val, r);
-}
+#include <asm/timex.h>
 
 static int riscv_clock_next_event(unsigned long delta,
                struct clock_event_device *ce)
 {
        csr_set(CSR_IE, IE_TIE);
-       if (IS_ENABLED(CONFIG_RISCV_SBI))
-               sbi_set_timer(get_cycles64() + delta);
-       else
-               mmio_set_timer(get_cycles64() + delta);
+       sbi_set_timer(get_cycles64() + delta);
        return 0;
 }
 
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
new file mode 100644 (file)
index 0000000..db2841d
--- /dev/null
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ *         Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define CFGR_PSC_OFFSET                9
+#define STM32_LP_RATING                1000
+#define STM32_TARGET_CLKRATE   (32000 * HZ)
+#define STM32_LP_MAX_PSC       7
+
+struct stm32_lp_private {
+       struct regmap *reg;
+       struct clock_event_device clkevt;
+       unsigned long period;
+       struct device *dev;
+};
+
+static struct stm32_lp_private*
+to_priv(struct clock_event_device *clkevt)
+{
+       return container_of(clkevt, struct stm32_lp_private, clkevt);
+}
+
+static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
+{
+       struct stm32_lp_private *priv = to_priv(clkevt);
+
+       regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+       regmap_write(priv->reg, STM32_LPTIM_IER, 0);
+       /* clear pending flags */
+       regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
+
+       return 0;
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+                                      struct clock_event_device *clkevt,
+                                      int is_periodic)
+{
+       struct stm32_lp_private *priv = to_priv(clkevt);
+
+       /* disable LPTIMER to be able to write into IER register*/
+       regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+       /* enable ARR interrupt */
+       regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+       /* enable LPTIMER to be able to write into ARR register */
+       regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+       /* set next event counter */
+       regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+
+       /* start counter */
+       if (is_periodic)
+               regmap_write(priv->reg, STM32_LPTIM_CR,
+                            STM32_LPTIM_CNTSTRT | STM32_LPTIM_ENABLE);
+       else
+               regmap_write(priv->reg, STM32_LPTIM_CR,
+                            STM32_LPTIM_SNGSTRT | STM32_LPTIM_ENABLE);
+
+       return 0;
+}
+
+static int stm32_clkevent_lp_set_next_event(unsigned long evt,
+                                           struct clock_event_device *clkevt)
+{
+       return stm32_clkevent_lp_set_timer(evt, clkevt,
+                                          clockevent_state_periodic(clkevt));
+}
+
+static int stm32_clkevent_lp_set_periodic(struct clock_event_device *clkevt)
+{
+       struct stm32_lp_private *priv = to_priv(clkevt);
+
+       return stm32_clkevent_lp_set_timer(priv->period, clkevt, true);
+}
+
+static int stm32_clkevent_lp_set_oneshot(struct clock_event_device *clkevt)
+{
+       struct stm32_lp_private *priv = to_priv(clkevt);
+
+       return stm32_clkevent_lp_set_timer(priv->period, clkevt, false);
+}
+
+static irqreturn_t stm32_clkevent_lp_irq_handler(int irq, void *dev_id)
+{
+       struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+       struct stm32_lp_private *priv = to_priv(clkevt);
+
+       regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF);
+
+       if (clkevt->event_handler)
+               clkevt->event_handler(clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
+                                           unsigned long *rate)
+{
+       int i;
+
+       for (i = 0; i <= STM32_LP_MAX_PSC; i++) {
+               if (DIV_ROUND_CLOSEST(*rate, 1 << i) < STM32_TARGET_CLKRATE)
+                       break;
+       }
+
+       regmap_write(priv->reg, STM32_LPTIM_CFGR, i << CFGR_PSC_OFFSET);
+
+       /* Adjust rate and period given the prescaler value */
+       *rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
+       priv->period = DIV_ROUND_UP(*rate, HZ);
+}
+
+static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
+                                 struct device_node *np, unsigned long rate)
+{
+       priv->clkevt.name = np->full_name;
+       priv->clkevt.cpumask = cpu_possible_mask;
+       priv->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
+                               CLOCK_EVT_FEAT_ONESHOT;
+       priv->clkevt.set_state_shutdown = stm32_clkevent_lp_shutdown;
+       priv->clkevt.set_state_periodic = stm32_clkevent_lp_set_periodic;
+       priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
+       priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
+       priv->clkevt.rating = STM32_LP_RATING;
+
+       clockevents_config_and_register(&priv->clkevt, rate, 0x1,
+                                       STM32_LPTIM_MAX_ARR);
+}
+
+static int stm32_clkevent_lp_probe(struct platform_device *pdev)
+{
+       struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+       struct stm32_lp_private *priv;
+       unsigned long rate;
+       int ret, irq;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->reg = ddata->regmap;
+       ret = clk_prepare_enable(ddata->clk);
+       if (ret)
+               return -EINVAL;
+
+       rate = clk_get_rate(ddata->clk);
+       if (!rate) {
+               ret = -EINVAL;
+               goto out_clk_disable;
+       }
+
+       irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+       if (irq <= 0) {
+               ret = irq;
+               goto out_clk_disable;
+       }
+
+       if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
+               ret = device_init_wakeup(&pdev->dev, true);
+               if (ret)
+                       goto out_clk_disable;
+
+               ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+               if (ret)
+                       goto out_clk_disable;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, stm32_clkevent_lp_irq_handler,
+                              IRQF_TIMER, pdev->name, &priv->clkevt);
+       if (ret)
+               goto out_clk_disable;
+
+       stm32_clkevent_lp_set_prescaler(priv, &rate);
+
+       stm32_clkevent_lp_init(priv, pdev->dev.parent->of_node, rate);
+
+       priv->dev = &pdev->dev;
+
+       return 0;
+
+out_clk_disable:
+       clk_disable_unprepare(ddata->clk);
+       return ret;
+}
+
+static int stm32_clkevent_lp_remove(struct platform_device *pdev)
+{
+       return -EBUSY; /* cannot unregister clockevent */
+}
+
+static const struct of_device_id stm32_clkevent_lp_of_match[] = {
+       { .compatible = "st,stm32-lptimer-timer", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, stm32_clkevent_lp_of_match);
+
+static struct platform_driver stm32_clkevent_lp_driver = {
+       .probe  = stm32_clkevent_lp_probe,
+       .remove = stm32_clkevent_lp_remove,
+       .driver = {
+               .name = "stm32-lptimer-timer",
+               .of_match_table = of_match_ptr(stm32_clkevent_lp_of_match),
+       },
+};
+module_platform_driver(stm32_clkevent_lp_driver);
+
+MODULE_ALIAS("platform:stm32-lptimer-timer");
+MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
+MODULE_LICENSE("GPL v2");
index afad06b..02ab56b 100644 (file)
@@ -73,8 +73,6 @@ static inline bool has_target(void)
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
 static int cpufreq_init_governor(struct cpufreq_policy *policy);
 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
-static int cpufreq_start_governor(struct cpufreq_policy *policy);
-static void cpufreq_stop_governor(struct cpufreq_policy *policy);
 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
 static int cpufreq_set_policy(struct cpufreq_policy *policy,
                              struct cpufreq_governor *new_gov,
@@ -2266,7 +2264,7 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy)
        module_put(policy->governor->owner);
 }
 
-static int cpufreq_start_governor(struct cpufreq_policy *policy)
+int cpufreq_start_governor(struct cpufreq_policy *policy)
 {
        int ret;
 
@@ -2293,7 +2291,7 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy)
        return 0;
 }
 
-static void cpufreq_stop_governor(struct cpufreq_policy *policy)
+void cpufreq_stop_governor(struct cpufreq_policy *policy)
 {
        if (cpufreq_suspended || !policy->governor)
                return;
index fc459c9..e0220a6 100644 (file)
@@ -36,6 +36,7 @@
 #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
 
 #define INTEL_CPUFREQ_TRANSITION_LATENCY       20000
+#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP     5000
 #define INTEL_CPUFREQ_TRANSITION_DELAY         500
 
 #ifdef CONFIG_ACPI
@@ -220,6 +221,7 @@ struct global_params {
  *                     preference/bias
  * @epp_saved:         Saved EPP/EPB during system suspend or CPU offline
  *                     operation
+ * @epp_cached         Cached HWP energy-performance preference value
  * @hwp_req_cached:    Cached value of the last HWP Request MSR
  * @hwp_cap_cached:    Cached value of the last HWP Capabilities MSR
  * @last_io_update:    Last time when IO wake flag was set
@@ -257,6 +259,7 @@ struct cpudata {
        s16 epp_policy;
        s16 epp_default;
        s16 epp_saved;
+       s16 epp_cached;
        u64 hwp_req_cached;
        u64 hwp_cap_cached;
        u64 last_io_update;
@@ -639,6 +642,26 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
        return index;
 }
 
+static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
+{
+       /*
+        * Use the cached HWP Request MSR value, because in the active mode the
+        * register itself may be updated by intel_pstate_hwp_boost_up() or
+        * intel_pstate_hwp_boost_down() at any time.
+        */
+       u64 value = READ_ONCE(cpu->hwp_req_cached);
+
+       value &= ~GENMASK_ULL(31, 24);
+       value |= (u64)epp << 24;
+       /*
+        * The only other updater of hwp_req_cached in the active mode,
+        * intel_pstate_hwp_set(), is called under the same lock as this
+        * function, so it cannot run in parallel with the update below.
+        */
+       WRITE_ONCE(cpu->hwp_req_cached, value);
+       return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
                                              int pref_index, bool use_raw,
                                              u32 raw_epp)
@@ -650,28 +673,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
                epp = cpu_data->epp_default;
 
        if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
-               /*
-                * Use the cached HWP Request MSR value, because the register
-                * itself may be updated by intel_pstate_hwp_boost_up() or
-                * intel_pstate_hwp_boost_down() at any time.
-                */
-               u64 value = READ_ONCE(cpu_data->hwp_req_cached);
-
-               value &= ~GENMASK_ULL(31, 24);
-
                if (use_raw)
                        epp = raw_epp;
                else if (epp == -EINVAL)
                        epp = epp_values[pref_index - 1];
 
-               value |= (u64)epp << 24;
-               /*
-                * The only other updater of hwp_req_cached in the active mode,
-                * intel_pstate_hwp_set(), is called under the same lock as this
-                * function, so it cannot run in parallel with the update below.
-                */
-               WRITE_ONCE(cpu_data->hwp_req_cached, value);
-               ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+               ret = intel_pstate_set_epp(cpu_data, epp);
        } else {
                if (epp == -EINVAL)
                        epp = (pref_index - 1) << 2;
@@ -697,10 +704,12 @@ static ssize_t show_energy_performance_available_preferences(
 
 cpufreq_freq_attr_ro(energy_performance_available_preferences);
 
+static struct cpufreq_driver intel_pstate;
+
 static ssize_t store_energy_performance_preference(
                struct cpufreq_policy *policy, const char *buf, size_t count)
 {
-       struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+       struct cpudata *cpu = all_cpu_data[policy->cpu];
        char str_preference[21];
        bool raw = false;
        ssize_t ret;
@@ -725,15 +734,44 @@ static ssize_t store_energy_performance_preference(
                raw = true;
        }
 
+       /*
+        * This function runs with the policy R/W semaphore held, which
+        * guarantees that the driver pointer will not change while it is
+        * running.
+        */
+       if (!intel_pstate_driver)
+               return -EAGAIN;
+
        mutex_lock(&intel_pstate_limits_lock);
 
-       ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp);
-       if (!ret)
-               ret = count;
+       if (intel_pstate_driver == &intel_pstate) {
+               ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
+       } else {
+               /*
+                * In the passive mode the governor needs to be stopped on the
+                * target CPU before the EPP update and restarted after it,
+                * which is super-heavy-weight, so make sure it is worth doing
+                * upfront.
+                */
+               if (!raw)
+                       epp = ret ? epp_values[ret - 1] : cpu->epp_default;
+
+               if (cpu->epp_cached != epp) {
+                       int err;
+
+                       cpufreq_stop_governor(policy);
+                       ret = intel_pstate_set_epp(cpu, epp);
+                       err = cpufreq_start_governor(policy);
+                       if (!ret) {
+                               cpu->epp_cached = epp;
+                               ret = err;
+                       }
+               }
+       }
 
        mutex_unlock(&intel_pstate_limits_lock);
 
-       return ret;
+       return ret ?: count;
 }
 
 static ssize_t show_energy_performance_preference(
@@ -1145,8 +1183,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
        return count;
 }
 
-static struct cpufreq_driver intel_pstate;
-
 static void update_qos_request(enum freq_qos_req_type type)
 {
        int max_state, turbo_max, freq, i, perf_pct;
@@ -1330,9 +1366,10 @@ static const struct attribute_group intel_pstate_attr_group = {
 
 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
 
+static struct kobject *intel_pstate_kobject;
+
 static void __init intel_pstate_sysfs_expose_params(void)
 {
-       struct kobject *intel_pstate_kobject;
        int rc;
 
        intel_pstate_kobject = kobject_create_and_add("intel_pstate",
@@ -1357,17 +1394,31 @@ static void __init intel_pstate_sysfs_expose_params(void)
        rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
        WARN_ON(rc);
 
-       if (hwp_active) {
-               rc = sysfs_create_file(intel_pstate_kobject,
-                                      &hwp_dynamic_boost.attr);
-               WARN_ON(rc);
-       }
-
        if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
                rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
                WARN_ON(rc);
        }
 }
+
+static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
+{
+       int rc;
+
+       if (!hwp_active)
+               return;
+
+       rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+       WARN_ON_ONCE(rc);
+}
+
+static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
+{
+       if (!hwp_active)
+               return;
+
+       sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+}
+
 /************************** sysfs end ************************/
 
 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
@@ -2247,7 +2298,10 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
 
 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
-       intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+       if (hwp_active)
+               intel_pstate_hwp_force_min_perf(policy->cpu);
+       else
+               intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
 }
 
 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
@@ -2255,12 +2309,10 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
        pr_debug("CPU %d exiting\n", policy->cpu);
 
        intel_pstate_clear_update_util_hook(policy->cpu);
-       if (hwp_active) {
+       if (hwp_active)
                intel_pstate_hwp_save_state(policy);
-               intel_pstate_hwp_force_min_perf(policy->cpu);
-       } else {
-               intel_cpufreq_stop_cpu(policy);
-       }
+
+       intel_cpufreq_stop_cpu(policy);
 }
 
 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
@@ -2390,13 +2442,71 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
                fp_toint(cpu->iowait_boost * 100));
 }
 
+static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
+                                    bool fast_switch)
+{
+       u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
+
+       value &= ~HWP_MIN_PERF(~0L);
+       value |= HWP_MIN_PERF(target_pstate);
+
+       /*
+        * The entire MSR needs to be updated in order to update the HWP min
+        * field in it, so opportunistically update the max too if needed.
+        */
+       value &= ~HWP_MAX_PERF(~0L);
+       value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+
+       if (value == prev)
+               return;
+
+       WRITE_ONCE(cpu->hwp_req_cached, value);
+       if (fast_switch)
+               wrmsrl(MSR_HWP_REQUEST, value);
+       else
+               wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
+static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+                                         u32 target_pstate, bool fast_switch)
+{
+       if (fast_switch)
+               wrmsrl(MSR_IA32_PERF_CTL,
+                      pstate_funcs.get_val(cpu, target_pstate));
+       else
+               wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+                             pstate_funcs.get_val(cpu, target_pstate));
+}
+
+static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
+                                      bool fast_switch)
+{
+       int old_pstate = cpu->pstate.current_pstate;
+
+       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+       if (target_pstate != old_pstate) {
+               cpu->pstate.current_pstate = target_pstate;
+               if (hwp_active)
+                       intel_cpufreq_adjust_hwp(cpu, target_pstate,
+                                                fast_switch);
+               else
+                       intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
+                                                     fast_switch);
+       }
+
+       intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
+                           INTEL_PSTATE_TRACE_TARGET, old_pstate);
+
+       return target_pstate;
+}
+
 static int intel_cpufreq_target(struct cpufreq_policy *policy,
                                unsigned int target_freq,
                                unsigned int relation)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
        struct cpufreq_freqs freqs;
-       int target_pstate, old_pstate;
+       int target_pstate;
 
        update_turbo_state();
 
@@ -2404,6 +2514,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
        freqs.new = target_freq;
 
        cpufreq_freq_transition_begin(policy, &freqs);
+
        switch (relation) {
        case CPUFREQ_RELATION_L:
                target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
@@ -2415,15 +2526,11 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
                target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
                break;
        }
-       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
-       old_pstate = cpu->pstate.current_pstate;
-       if (target_pstate != cpu->pstate.current_pstate) {
-               cpu->pstate.current_pstate = target_pstate;
-               wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
-                             pstate_funcs.get_val(cpu, target_pstate));
-       }
+
+       target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+
        freqs.new = target_pstate * cpu->pstate.scaling;
-       intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
+
        cpufreq_freq_transition_end(policy, &freqs, false);
 
        return 0;
@@ -2433,15 +2540,14 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
                                              unsigned int target_freq)
 {
        struct cpudata *cpu = all_cpu_data[policy->cpu];
-       int target_pstate, old_pstate;
+       int target_pstate;
 
        update_turbo_state();
 
        target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
-       target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
-       old_pstate = cpu->pstate.current_pstate;
-       intel_pstate_update_pstate(cpu, target_pstate);
-       intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
+
+       target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+
        return target_pstate * cpu->pstate.scaling;
 }
 
@@ -2461,7 +2567,6 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return ret;
 
        policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
-       policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
        /* This reflects the intel_pstate_get_cpu_pstates() setting. */
        policy->cur = policy->cpuinfo.min_freq;
 
@@ -2473,10 +2578,18 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       if (hwp_active)
+       if (hwp_active) {
+               u64 value;
+
                intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
-       else
+               policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
+               rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+               WRITE_ONCE(cpu->hwp_req_cached, value);
+               cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24;
+       } else {
                turbo_max = cpu->pstate.turbo_pstate;
+               policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
+       }
 
        min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
        min_freq *= cpu->pstate.scaling;
@@ -2553,6 +2666,10 @@ static void intel_pstate_driver_cleanup(void)
                }
        }
        put_online_cpus();
+
+       if (intel_pstate_driver == &intel_pstate)
+               intel_pstate_sysfs_hide_hwp_dynamic_boost();
+
        intel_pstate_driver = NULL;
 }
 
@@ -2560,6 +2677,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 {
        int ret;
 
+       if (driver == &intel_pstate)
+               intel_pstate_sysfs_expose_hwp_dynamic_boost();
+
        memset(&global, 0, sizeof(global));
        global.max_perf_pct = 100;
 
@@ -2577,9 +2697,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 
 static int intel_pstate_unregister_driver(void)
 {
-       if (hwp_active)
-               return -EBUSY;
-
        cpufreq_unregister_driver(intel_pstate_driver);
        intel_pstate_driver_cleanup();
 
@@ -2835,7 +2952,10 @@ static int __init intel_pstate_init(void)
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
-                       default_driver = &intel_pstate;
+                       intel_cpufreq.attr = hwp_cpufreq_attrs;
+                       if (!default_driver)
+                               default_driver = &intel_pstate;
+
                        goto hwp_cpu_matched;
                }
        } else {
@@ -2906,14 +3026,13 @@ static int __init intel_pstate_setup(char *str)
        if (!str)
                return -EINVAL;
 
-       if (!strcmp(str, "disable")) {
+       if (!strcmp(str, "disable"))
                no_load = 1;
-       } else if (!strcmp(str, "active")) {
+       else if (!strcmp(str, "active"))
                default_driver = &intel_pstate;
-       } else if (!strcmp(str, "passive")) {
+       else if (!strcmp(str, "passive"))
                default_driver = &intel_cpufreq;
-               no_hwp = 1;
-       }
+
        if (!strcmp(str, "no_hwp")) {
                pr_info("HWP disabled\n");
                no_hwp = 1;
index 0c66d61..080955a 100644 (file)
@@ -204,8 +204,8 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
        u32 status;
        int err;
 
-       virtio_cread(vcrypto->vdev,
-           struct virtio_crypto_config, status, &status);
+       virtio_cread_le(vcrypto->vdev,
+                       struct virtio_crypto_config, status, &status);
 
        /*
         * Unknown status bits would be a host error and the driver
@@ -323,31 +323,31 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        if (!vcrypto)
                return -ENOMEM;
 
-       virtio_cread(vdev, struct virtio_crypto_config,
+       virtio_cread_le(vdev, struct virtio_crypto_config,
                        max_dataqueues, &max_data_queues);
        if (max_data_queues < 1)
                max_data_queues = 1;
 
-       virtio_cread(vdev, struct virtio_crypto_config,
-               max_cipher_key_len, &max_cipher_key_len);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               max_auth_key_len, &max_auth_key_len);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               max_size, &max_size);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               crypto_services, &crypto_services);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               cipher_algo_l, &cipher_algo_l);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               cipher_algo_h, &cipher_algo_h);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               hash_algo, &hash_algo);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               mac_algo_l, &mac_algo_l);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               mac_algo_h, &mac_algo_h);
-       virtio_cread(vdev, struct virtio_crypto_config,
-               aead_algo, &aead_algo);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       max_cipher_key_len, &max_cipher_key_len);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       max_auth_key_len, &max_auth_key_len);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       max_size, &max_size);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       crypto_services, &crypto_services);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       cipher_algo_l, &cipher_algo_l);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       cipher_algo_h, &cipher_algo_h);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       hash_algo, &hash_algo);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       mac_algo_l, &mac_algo_l);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       mac_algo_h, &mac_algo_h);
+       virtio_cread_le(vdev, struct virtio_crypto_config,
+                       aead_algo, &aead_algo);
 
        /* Add virtio crypto device to global table */
        err = virtcrypto_devmgr_add_dev(vcrypto);
index f508285..c82cbcb 100644 (file)
@@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
        int err, id;
 
        if (blocksize != PAGE_SIZE) {
-               pr_debug("%s: error: unsupported blocksize for dax\n",
+               pr_info("%s: error: unsupported blocksize for dax\n",
                                bdevname(bdev, buf));
                return false;
        }
 
        err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
        if (err) {
-               pr_debug("%s: error: unaligned partition for dax\n",
+               pr_info("%s: error: unaligned partition for dax\n",
                                bdevname(bdev, buf));
                return false;
        }
@@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
        last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
        err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
        if (err) {
-               pr_debug("%s: error: unaligned partition for dax\n",
+               pr_info("%s: error: unaligned partition for dax\n",
                                bdevname(bdev, buf));
                return false;
        }
@@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
        id = dax_read_lock();
        len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
        len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
-       dax_read_unlock(id);
 
        if (len < 1 || len2 < 1) {
-               pr_debug("%s: error: dax access failed (%ld)\n",
+               pr_info("%s: error: dax access failed (%ld)\n",
                                bdevname(bdev, buf), len < 1 ? len : len2);
+               dax_read_unlock(id);
                return false;
        }
 
@@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
                put_dev_pagemap(end_pgmap);
 
        }
+       dax_read_unlock(id);
 
        if (!dax_enabled) {
-               pr_debug("%s: error: dax support not enabled\n",
+               pr_info("%s: error: dax support not enabled\n",
                                bdevname(bdev, buf));
                return false;
        }
index 07f5273..434a331 100644 (file)
 DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
-struct lock_class_key reservation_seqcount_class;
-EXPORT_SYMBOL(reservation_seqcount_class);
-
-const char reservation_seqcount_string[] = "reservation_seqcount";
-EXPORT_SYMBOL(reservation_seqcount_string);
-
 /**
  * dma_resv_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
@@ -143,9 +137,8 @@ subsys_initcall(dma_resv_lockdep);
 void dma_resv_init(struct dma_resv *obj)
 {
        ww_mutex_init(&obj->lock, &reservation_ww_class);
+       seqcount_ww_mutex_init(&obj->seq, &obj->lock);
 
-       __seqcount_init(&obj->seq, reservation_seqcount_string,
-                       &reservation_seqcount_class);
        RCU_INIT_POINTER(obj->fence, NULL);
        RCU_INIT_POINTER(obj->fence_excl, NULL);
 }
@@ -275,7 +268,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
        fobj = dma_resv_get_list(obj);
        count = fobj->shared_count;
 
-       preempt_disable();
        write_seqcount_begin(&obj->seq);
 
        for (i = 0; i < count; ++i) {
@@ -297,7 +289,6 @@ replace:
        smp_store_mb(fobj->shared_count, count);
 
        write_seqcount_end(&obj->seq);
-       preempt_enable();
        dma_fence_put(old);
 }
 EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -324,14 +315,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
        if (fence)
                dma_fence_get(fence);
 
-       preempt_disable();
        write_seqcount_begin(&obj->seq);
        /* write_seqcount_begin provides the necessary memory barrier */
        RCU_INIT_POINTER(obj->fence_excl, fence);
        if (old)
                old->shared_count = 0;
        write_seqcount_end(&obj->seq);
-       preempt_enable();
 
        /* inplace update, no shared fences */
        while (i--)
@@ -409,13 +398,11 @@ retry:
        src_list = dma_resv_get_list(dst);
        old = dma_resv_get_excl(dst);
 
-       preempt_disable();
        write_seqcount_begin(&dst->seq);
        /* write_seqcount_begin provides the necessary memory barrier */
        RCU_INIT_POINTER(dst->fence_excl, new);
        RCU_INIT_POINTER(dst->fence, dst_list);
        write_seqcount_end(&dst->seq);
-       preempt_enable();
 
        dma_resv_list_free(src_list);
        dma_fence_put(old);
index d68346a..ebe5099 100644 (file)
        (n << (28 + (2 * skl) - PAGE_SHIFT))
 
 static int nr_channels;
+static struct pci_dev *mci_pdev;
+static int ie31200_registered = 1;
 
 struct ie31200_priv {
        void __iomem *window;
@@ -538,12 +540,16 @@ fail_free:
 static int ie31200_init_one(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
 {
-       edac_dbg(0, "MC:\n");
+       int rc;
 
+       edac_dbg(0, "MC:\n");
        if (pci_enable_device(pdev) < 0)
                return -EIO;
+       rc = ie31200_probe1(pdev, ent->driver_data);
+       if (rc == 0 && !mci_pdev)
+               mci_pdev = pci_dev_get(pdev);
 
-       return ie31200_probe1(pdev, ent->driver_data);
+       return rc;
 }
 
 static void ie31200_remove_one(struct pci_dev *pdev)
@@ -552,6 +558,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
        struct ie31200_priv *priv;
 
        edac_dbg(0, "\n");
+       pci_dev_put(mci_pdev);
+       mci_pdev = NULL;
        mci = edac_mc_del_mc(&pdev->dev);
        if (!mci)
                return;
@@ -593,17 +601,53 @@ static struct pci_driver ie31200_driver = {
 
 static int __init ie31200_init(void)
 {
+       int pci_rc, i;
+
        edac_dbg(3, "MC:\n");
        /* Ensure that the OPSTATE is set correctly for POLL or NMI */
        opstate_init();
 
-       return pci_register_driver(&ie31200_driver);
+       pci_rc = pci_register_driver(&ie31200_driver);
+       if (pci_rc < 0)
+               goto fail0;
+
+       if (!mci_pdev) {
+               ie31200_registered = 0;
+               for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
+                       mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
+                                                 ie31200_pci_tbl[i].device,
+                                                 NULL);
+                       if (mci_pdev)
+                               break;
+               }
+               if (!mci_pdev) {
+                       edac_dbg(0, "ie31200 pci_get_device fail\n");
+                       pci_rc = -ENODEV;
+                       goto fail1;
+               }
+               pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
+               if (pci_rc < 0) {
+                       edac_dbg(0, "ie31200 init fail\n");
+                       pci_rc = -ENODEV;
+                       goto fail1;
+               }
+       }
+       return 0;
+
+fail1:
+       pci_unregister_driver(&ie31200_driver);
+fail0:
+       pci_dev_put(mci_pdev);
+
+       return pci_rc;
 }
 
 static void __exit ie31200_exit(void)
 {
        edac_dbg(3, "MC:\n");
        pci_unregister_driver(&ie31200_driver);
+       if (!ie31200_registered)
+               ie31200_remove_one(mci_pdev);
 }
 
 module_init(ie31200_init);
index e7e36aa..b4b9ce9 100644 (file)
@@ -1136,15 +1136,14 @@ int sdei_event_handler(struct pt_regs *regs,
         * access kernel memory.
         * Do the same here because this doesn't come via the same entry code.
        */
-       orig_addr_limit = get_fs();
-       set_fs(USER_DS);
+       orig_addr_limit = force_uaccess_begin();
 
        err = arg->callback(event_num, regs, arg->callback_arg);
        if (err)
                pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
                                   event_num, smp_processor_id(), err);
 
-       set_fs(orig_addr_limit);
+       force_uaccess_end(orig_addr_limit);
 
        return err;
 }
index 75daaf2..296b18f 100644 (file)
@@ -28,8 +28,8 @@ cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
 KBUILD_CFLAGS                  := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
                                   -include $(srctree)/drivers/firmware/efi/libstub/hidden.h \
                                   -D__NO_FORTIFY \
-                                  $(call cc-option,-ffreestanding) \
-                                  $(call cc-option,-fno-stack-protector) \
+                                  -ffreestanding \
+                                  -fno-stack-protector \
                                   $(call cc-option,-fno-addrsig) \
                                   -D__DISABLE_EXPORTS
 
index c7fd0c4..1102de7 100644 (file)
@@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
                                unsigned int engine_id,
                                unsigned int queue_id)
 {
-       uint32_t sdma_engine_reg_base[2] = {
-               SOC15_REG_OFFSET(SDMA0, 0,
-                                mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
-               SOC15_REG_OFFSET(SDMA1, 0,
-                                mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
-       };
-       uint32_t retval = sdma_engine_reg_base[engine_id]
+       uint32_t sdma_engine_reg_base = 0;
+       uint32_t sdma_rlc_reg_offset;
+
+       switch (engine_id) {
+       default:
+               dev_warn(adev->dev,
+                        "Invalid sdma engine id (%d), using engine id 0\n",
+                        engine_id);
+               fallthrough;
+       case 0:
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+               break;
+       case 1:
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+                               mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+               break;
+       }
+
+       sdma_rlc_reg_offset = sdma_engine_reg_base
                + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
 
        pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
-                       queue_id, retval);
+                queue_id, sdma_rlc_reg_offset);
 
-       return retval;
+       return sdma_rlc_reg_offset;
 }
 
 static inline struct v9_mqd *get_mqd(void *mqd)
index e5a5ba8..a58af51 100644 (file)
@@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
        new->shared_count = k;
 
        /* Install the new fence list, seqcount provides the barriers */
-       preempt_disable();
        write_seqcount_begin(&resv->seq);
        RCU_INIT_POINTER(resv->fence, new);
        write_seqcount_end(&resv->seq);
-       preempt_enable();
 
        /* Drop the references to the removed fences or move them to ef_list */
        for (i = j, k = 0; i < old->shared_count; ++i) {
index aa5b54e..eb7cfe8 100644 (file)
@@ -2574,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                AMD_IP_BLOCK_TYPE_IH,
        };
 
+       for (i = 0; i < adev->num_ip_blocks; i++)
+               adev->ip_blocks[i].status.hw = false;
+
        for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
                int j;
                struct amdgpu_ip_block *block;
@@ -2581,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                for (j = 0; j < adev->num_ip_blocks; j++) {
                        block = &adev->ip_blocks[j];
 
-                       block->status.hw = false;
                        if (block->version->type != ip_order[i] ||
                                !block->status.valid)
                                continue;
index 5f20cad..e4dbf14 100644 (file)
@@ -3212,6 +3212,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
                return 0;
 
+       /* Skip crit temp on APU */
+       if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
+           (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+            attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+               return 0;
+
        /* Skip limit attributes if DPM is not enabled */
        if (!adev->pm.dpm_enabled &&
            (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
index fe7d39b..7fe5642 100644 (file)
@@ -193,12 +193,18 @@ static int psp_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        psp_memory_training_fini(&adev->psp);
-       release_firmware(adev->psp.sos_fw);
-       adev->psp.sos_fw = NULL;
-       release_firmware(adev->psp.asd_fw);
-       adev->psp.asd_fw = NULL;
-       release_firmware(adev->psp.ta_fw);
-       adev->psp.ta_fw = NULL;
+       if (adev->psp.sos_fw) {
+               release_firmware(adev->psp.sos_fw);
+               adev->psp.sos_fw = NULL;
+       }
+       if (adev->psp.asd_fw) {
+               release_firmware(adev->psp.asd_fw);
+               adev->psp.asd_fw = NULL;
+       }
+       if (adev->psp.ta_fw) {
+               release_firmware(adev->psp.ta_fw);
+               adev->psp.ta_fw = NULL;
+       }
 
        if (adev->asic_type == CHIP_NAVI10)
                psp_sysfs_fini(adev);
@@ -409,11 +415,28 @@ static int psp_clear_vf_fw(struct psp_context *psp)
        return ret;
 }
 
+static bool psp_skip_tmr(struct psp_context *psp)
+{
+       switch (psp->adev->asic_type) {
+       case CHIP_NAVI12:
+       case CHIP_SIENNA_CICHLID:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static int psp_tmr_load(struct psp_context *psp)
 {
        int ret;
        struct psp_gfx_cmd_resp *cmd;
 
+       /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
+        * Already set up by host driver.
+        */
+       if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
+               return 0;
+
        cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
        if (!cmd)
                return -ENOMEM;
@@ -1987,7 +2010,7 @@ static int psp_suspend(void *handle)
 
        ret = psp_tmr_terminate(psp);
        if (ret) {
-               DRM_ERROR("Falied to terminate tmr\n");
+               DRM_ERROR("Failed to terminate tmr\n");
                return ret;
        }
 
index e10f02e..1bedb41 100644 (file)
@@ -1243,7 +1243,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
        if (!obj || !obj->ent)
                return;
 
-       debugfs_remove(obj->ent);
        obj->ent = NULL;
        put_obj(obj);
 }
@@ -1257,7 +1256,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
                amdgpu_ras_debugfs_remove(adev, &obj->head);
        }
 
-       debugfs_remove_recursive(con->dir);
        con->dir = NULL;
 }
 /* debugfs end */
@@ -1618,7 +1616,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
        data = con->eh_data;
        save_count = data->count - control->num_recs;
        /* only new entries are saved */
-       if (save_count > 0)
+       if (save_count > 0) {
                if (amdgpu_ras_eeprom_process_recods(control,
                                                        &data->bps[control->num_recs],
                                                        true,
@@ -1627,6 +1625,9 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
                        return -EIO;
                }
 
+               dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+       }
+
        return 0;
 }
 
index 134cc36..0739e25 100644 (file)
@@ -462,7 +462,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
        unsigned int pages;
        int i, r;
 
-       *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+       *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
        if (!*sgt)
                return -ENOMEM;
 
index 61e8924..65997ff 100644 (file)
@@ -3082,7 +3082,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
@@ -3127,7 +3127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
@@ -3158,7 +3158,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
 };
@@ -7529,6 +7529,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
        case CHIP_NAVI14:
        case CHIP_NAVI12:
        case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
                amdgpu_gfx_off_ctrl(adev, enable);
                break;
        default:
index cb9d60a..b95f222 100644 (file)
@@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
 };
 
 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
index fa0bca3..5d25059 100644 (file)
@@ -135,6 +135,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev)
 {
        uint32_t tmp;
 
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        /* Setup L2 cache */
        tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
        tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -190,6 +196,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev)
 
 static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
 {
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
                     0xFFFFFFFF);
        WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
@@ -326,6 +338,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
+
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
        tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
                            RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
index 42f1a51..c41e559 100644 (file)
@@ -49,12 +49,11 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
 static int jpeg_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       if (adev->asic_type == CHIP_SIENNA_CICHLID) {
-               u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+       u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+
+       if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+               return -ENOENT;
 
-               if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
-                       return -ENOENT;
-       }
        adev->jpeg.num_jpeg_inst = 1;
 
        jpeg_v3_0_set_dec_ring_funcs(adev);
index 757fa8e..c79fc54 100644 (file)
@@ -134,6 +134,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
 {
        uint32_t tmp;
 
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        /* Setup L2 cache */
        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL);
        tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -189,6 +195,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
 
 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
 {
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        WREG32_SOC15(MMHUB, 0,
                     mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
                     0xFFFFFFFF);
@@ -318,6 +330,13 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
 void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
 {
        u32 tmp;
+
+       /* These registers are not accessible to VF-SRIOV.
+        * The PF will program them instead.
+        */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
        tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
                            RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
index ea69ae7..da8024c 100644 (file)
@@ -97,6 +97,49 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 }
 
+static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags, address, data;
+       u64 r;
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       /* read low 32 bit */
+       WREG32(address, reg);
+       (void)RREG32(address);
+       r = RREG32(data);
+
+       /* read high 32 bit*/
+       WREG32(address, reg + 4);
+       (void)RREG32(address);
+       r |= ((u64)RREG32(data) << 32);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
+{
+       unsigned long flags, address, data;
+
+       address = adev->nbio.funcs->get_pcie_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       /* write low 32 bit */
+       WREG32(address, reg);
+       (void)RREG32(address);
+       WREG32(data, (u32)(v & 0xffffffffULL));
+       (void)RREG32(data);
+
+       /* write high 32 bit */
+       WREG32(address, reg + 4);
+       (void)RREG32(address);
+       WREG32(data, (u32)(v >> 32));
+       (void)RREG32(data);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags, address, data;
@@ -319,10 +362,15 @@ nv_asic_reset_method(struct amdgpu_device *adev)
                dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
                                  amdgpu_reset_method);
 
-       if (smu_baco_is_support(smu))
-               return AMD_RESET_METHOD_BACO;
-       else
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
                return AMD_RESET_METHOD_MODE1;
+       default:
+               if (smu_baco_is_support(smu))
+                       return AMD_RESET_METHOD_BACO;
+               else
+                       return AMD_RESET_METHOD_MODE1;
+       }
 }
 
 static int nv_asic_reset(struct amdgpu_device *adev)
@@ -673,6 +721,8 @@ static int nv_common_early_init(void *handle)
        adev->smc_wreg = NULL;
        adev->pcie_rreg = &nv_pcie_rreg;
        adev->pcie_wreg = &nv_pcie_wreg;
+       adev->pcie_rreg64 = &nv_pcie_rreg64;
+       adev->pcie_wreg64 = &nv_pcie_wreg64;
 
        /* TODO: will add them during VCN v2 implementation */
        adev->uvd_ctx_rreg = NULL;
index d488d25..e16874f 100644 (file)
@@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                }
                break;
        case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
                err = psp_init_ta_microcode(&adev->psp, chip_name);
                if (err)
                        return err;
                break;
-       case CHIP_NAVY_FLOUNDER:
-               break;
        default:
                BUG();
        }
index 910a4a3..63e5547 100644 (file)
@@ -1659,7 +1659,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
        .emit_ib = vcn_v2_0_dec_ring_emit_ib,
        .emit_fence = vcn_v2_0_dec_ring_emit_fence,
        .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
-       .test_ring = amdgpu_vcn_dec_ring_test_ring,
+       .test_ring = vcn_v2_0_dec_ring_test_ring,
        .test_ib = amdgpu_vcn_dec_ring_test_ib,
        .insert_nop = vcn_v2_0_dec_ring_insert_nop,
        .insert_start = vcn_v2_0_dec_ring_insert_start,
index 407065c..df93382 100644 (file)
@@ -97,6 +97,8 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
 #endif
 
 #define FIRMWARE_RAVEN_DMCU            "amdgpu/raven_dmcu.bin"
@@ -1185,10 +1187,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                break;
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        case CHIP_SIENNA_CICHLID:
-       case CHIP_NAVY_FLOUNDER:
                dmub_asic = DMUB_ASIC_DCN30;
                fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
                break;
+       case CHIP_NAVY_FLOUNDER:
+               dmub_asic = DMUB_ASIC_DCN30;
+               fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+               break;
 #endif
 
        default:
@@ -2191,6 +2196,7 @@ void amdgpu_dm_update_connector_after_detect(
 
                        drm_connector_update_edid_property(connector,
                                                           aconnector->edid);
+                       drm_add_edid_modes(connector, aconnector->edid);
 
                        if (aconnector->dc_link->aux_mode)
                                drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -8544,6 +8550,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        if (ret)
                goto fail;
 
+       /* Check connector changes */
+       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+               struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+               struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+               /* Skip connectors that are disabled or part of modeset already. */
+               if (!old_con_state->crtc && !new_con_state->crtc)
+                       continue;
+
+               if (!new_con_state->crtc)
+                       continue;
+
+               new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+               if (IS_ERR(new_crtc_state)) {
+                       ret = PTR_ERR(new_crtc_state);
+                       goto fail;
+               }
+
+               if (dm_old_con_state->abm_level !=
+                   dm_new_con_state->abm_level)
+                       new_crtc_state->connectors_changed = true;
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if (adev->asic_type >= CHIP_NAVI10) {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
index 998f729..e5a6d91 100644 (file)
@@ -35,6 +35,7 @@
 #include "dmub/dmub_srv.h"
 #include "resource.h"
 #include "dsc.h"
+#include "dc_link_dp.h"
 
 struct dmub_debugfs_trace_header {
        uint32_t entry_count;
@@ -1150,7 +1151,7 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
        return result;
 }
 
-static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
+static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
                                    size_t size, loff_t *pos)
 {
        char *rd_buf = NULL;
@@ -1186,7 +1187,7 @@ static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf,
 
        snprintf(rd_buf_ptr, str_len,
                "%d\n",
-               dsc_state.dsc_bytes_per_pixel);
+               dsc_state.dsc_bits_per_pixel);
        rd_buf_ptr += str_len;
 
        while (size) {
@@ -1460,9 +1461,9 @@ static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
        .llseek = default_llseek
 };
 
-static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = {
+static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
        .owner = THIS_MODULE,
-       .read = dp_dsc_bytes_per_pixel_read,
+       .read = dp_dsc_bits_per_pixel_read,
        .llseek = default_llseek
 };
 
@@ -1552,7 +1553,7 @@ static const struct {
                {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
                {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
                {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
-               {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops},
+               {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
                {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
                {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
                {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
index 008d4d1..ad394ae 100644 (file)
@@ -2834,6 +2834,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
        .bios_parser_destroy = bios_parser_destroy,
 
        .get_board_layout_info = bios_get_board_layout_info,
+
+       .get_atom_dc_golden_table = NULL
 };
 
 static bool bios_parser_construct(
index b868413..2d5c7da 100644 (file)
@@ -1108,6 +1108,18 @@ static enum bp_result bios_parser_enable_disp_power_gating(
                action);
 }
 
+static enum bp_result bios_parser_enable_lvtma_control(
+       struct dc_bios *dcb,
+       uint8_t uc_pwr_on)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+       if (!bp->cmd_tbl.enable_lvtma_control)
+               return BP_RESULT_FAILURE;
+
+       return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on);
+}
+
 static bool bios_parser_is_accelerated_mode(
        struct dc_bios *dcb)
 {
@@ -2079,6 +2091,85 @@ static uint16_t bios_parser_pack_data_tables(
        return 0;
 }
 
+static struct atom_dc_golden_table_v1 *bios_get_golden_table(
+               struct bios_parser *bp,
+               uint32_t rev_major,
+               uint32_t rev_minor,
+               uint16_t *dc_golden_table_ver)
+{
+       struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL;
+       uint32_t dc_golden_offset = 0;
+       *dc_golden_table_ver = 0;
+
+       if (!DATA_TABLES(dce_info))
+               return NULL;
+
+       /* ver.4.4 or higher */
+       switch (rev_major) {
+       case 4:
+               switch (rev_minor) {
+               case 4:
+                       disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
+                                                                       DATA_TABLES(dce_info));
+                       if (!disp_cntl_tbl_4_4)
+                               return NULL;
+                       dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset;
+                       *dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver;
+                       break;
+               }
+               break;
+       }
+
+       if (!dc_golden_offset)
+               return NULL;
+
+       if (*dc_golden_table_ver != 1)
+               return NULL;
+
+       return GET_IMAGE(struct atom_dc_golden_table_v1,
+                       dc_golden_offset);
+}
+
+static enum bp_result bios_get_atom_dc_golden_table(
+       struct dc_bios *dcb)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+       uint16_t dc_golden_table_ver = 0;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                                       DATA_TABLES(dce_info));
+       if (!header)
+               return BP_RESULT_UNSUPPORTED;
+
+       get_atom_data_table_revision(header, &tbl_revision);
+
+       atom_dc_golden_table = bios_get_golden_table(bp,
+                       tbl_revision.major,
+                       tbl_revision.minor,
+                       &dc_golden_table_ver);
+
+       if (!atom_dc_golden_table)
+               return BP_RESULT_UNSUPPORTED;
+
+       dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver;
+       dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val;
+       dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val;
+       dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val;
+       dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val;
+
+       return result;
+}
+
+
 static const struct dc_vbios_funcs vbios_funcs = {
        .get_connectors_number = bios_parser_get_connectors_number,
 
@@ -2128,6 +2219,10 @@ static const struct dc_vbios_funcs vbios_funcs = {
 
        .get_board_layout_info = bios_get_board_layout_info,
        .pack_data_tables = bios_parser_pack_data_tables,
+
+       .get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
+
+       .enable_lvtma_control = bios_parser_enable_lvtma_control
 };
 
 static bool bios_parser2_construct(
index bed9157..eb3ae5c 100644 (file)
@@ -904,6 +904,33 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
        return 0;
 }
 
+/******************************************************************************
+ ******************************************************************************
+ **
+ **                  LVTMA CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_lvtma_control(
+       struct bios_parser *bp,
+       uint8_t uc_pwr_on);
+
+static void init_enable_lvtma_control(struct bios_parser *bp)
+{
+       /* TODO add switch for table vrsion */
+       bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control;
+
+}
+
+static enum bp_result enable_lvtma_control(
+       struct bios_parser *bp,
+       uint8_t uc_pwr_on)
+{
+       enum bp_result result = BP_RESULT_FAILURE;
+       return result;
+}
+
 void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
 {
        init_dig_encoder_control(bp);
@@ -919,4 +946,5 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
        init_set_dce_clock(bp);
        init_get_smu_clock_info(bp);
 
+       init_enable_lvtma_control(bp);
 }
index 7a2af24..7bdce01 100644 (file)
@@ -94,7 +94,8 @@ struct cmd_tbl {
                struct bp_set_dce_clock_parameters *bp_params);
        unsigned int (*get_smu_clock_info)(
                        struct bios_parser *bp, uint8_t id);
-
+       enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
+                       uint8_t uc_pwr_on);
 };
 
 void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
index 3fab929..e133edc 100644 (file)
@@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru
        return disp_clk_threshold;
 }
 
-static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
+static void ramp_up_dispclk_with_dpp(
+               struct clk_mgr_internal *clk_mgr,
+               struct dc *dc,
+               struct dc_clocks *new_clocks,
+               bool safe_to_lower)
 {
        int i;
        int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
        bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
 
+       /* this function is to change dispclk, dppclk and dprefclk according to
+        * bandwidth requirement. Its call stack is rv1_update_clocks -->
+        * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth
+        * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw,
+        * prepare_bandwidth will be called first to allow enough clock,
+        * watermark for change, after end of dcn hw change, optimize_bandwidth
+        * is executed to lower clock to save power for new dcn hw settings.
+        *
+        * below is sequence of commit_planes_for_stream:
+        *
+        * step 1: prepare_bandwidth - raise clock to have enough bandwidth
+        * step 2: lock_doublebuffer_enable
+        * step 3: pipe_control_lock(true) - make dchubp register change will
+        * not take effect right way
+        * step 4: apply_ctx_for_surface - program dchubp
+        * step 5: pipe_control_lock(false) - dchubp register change take effect
+        * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream
+        * for full_date, optimize clock to save power
+        *
+        * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be
+        * changed for new dchubp configuration. but real dcn hub dchubps are
+        * still running with old configuration until end of step 5. this need
+        * clocks settings at step 1 should not less than that before step 1.
+        * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower
+        * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) ||
+        * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz)
+        * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz
+        *
+        * the second condition is based on new dchubp configuration. dppclk
+        * for new dchubp may be different from dppclk before step 1.
+        * for example, before step 1, dchubps are as below:
+        * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979)
+        * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080)
+        * for dppclk for pipe0 need dppclk = dispclk
+        *
+        * new dchubp pipe split configuration:
+        * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080)
+        * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080)
+        * dppclk only needs dppclk = dispclk /2.
+        *
+        * dispclk, dppclk are not lock by otg master lock. they take effect
+        * after step 1. during this transition, dispclk are the same, but
+        * dppclk is changed to half of previous clock for old dchubp
+        * configuration between step 1 and step 6. This may cause p-state
+        * warning intermittently.
+        *
+        * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we
+        * need make sure dppclk are not changed to less between step 1 and 6.
+        * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz,
+        * new display clock is raised, but we do not know ratio of
+        * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz,
+        * new_clocks->dispclk_khz /2 does not guarantee equal or higher than
+        * old dppclk. we could ignore power saving different between
+        * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6.
+        * as long as safe_to_lower = false, set dpclk = dispclk to simplify
+        * condition check.
+        * todo: review this change for other asic.
+        **/
+       if (!safe_to_lower)
+               request_dpp_div = false;
+
        /* set disp clk to dpp clk threshold */
 
        clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
@@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
        /* program dispclk on = as a w/a for sleep resume clock ramping issues */
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
                        || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
-               ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
+               ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower);
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                send_request_to_lower = true;
        }
index d94fdc5..9133646 100644 (file)
@@ -323,9 +323,10 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
                        /* if clock is being raised, increase refclk before lowering DTO */
                        if (update_dppclk || update_dispclk)
                                dcn20_update_clocks_update_dentist(clk_mgr);
-                       /* always update dtos unless clock is lowered and not safe to lower */
-                       if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
-                               dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+                       /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
+                        * that we do not lower dto when it is not safe to lower. We do not need to
+                        * compare the current and new dppclk before calling this function.*/
+                       dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
                }
        }
 
index ef0b594..92eb1ca 100644 (file)
@@ -1250,6 +1250,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        int i, k, l;
        struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
 
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+       dc_allow_idle_optimizations(dc, false);
+#endif
 
        for (i = 0; i < context->stream_count; i++)
                dc_streams[i] =  context->streams[i];
@@ -1838,6 +1841,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
        int i;
        enum surface_update_type overall_type = UPDATE_TYPE_FAST;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+       if (dc->idle_optimizations_allowed)
+               overall_type = UPDATE_TYPE_FULL;
+
+#endif
        if (stream_status == NULL || stream_status->plane_count != surface_count)
                overall_type = UPDATE_TYPE_FULL;
 
@@ -2306,8 +2314,14 @@ static void commit_planes_for_stream(struct dc *dc,
                }
        }
 
-       if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
-               dc->hwss.prepare_bandwidth(dc, context);
+       if (update_type == UPDATE_TYPE_FULL) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+               dc_allow_idle_optimizations(dc, false);
+
+#endif
+               if (dc->optimize_seamless_boot_streams == 0)
+                       dc->hwss.prepare_bandwidth(dc, context);
+
                context_clock_trace(dc, context);
        }
 
index 02742cc..117d8aa 100644 (file)
@@ -1540,6 +1540,9 @@ static bool dc_link_construct(struct dc_link *link,
                }
        }
 
+       if (bios->funcs->get_atom_dc_golden_table)
+               bios->funcs->get_atom_dc_golden_table(bios);
+
        /*
         * TODO check if GPIO programmed correctly
         *
@@ -3102,6 +3105,9 @@ void core_link_enable_stream(
        struct dc *dc = pipe_ctx->stream->ctx->dc;
        struct dc_stream_state *stream = pipe_ctx->stream;
        enum dc_status status;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+       enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+#endif
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
        if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
@@ -3136,8 +3142,8 @@ void core_link_enable_stream(
        pipe_ctx->stream->link->link_state_valid = true;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-               if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
-                                       pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+       if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+               pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
 #endif
 
        if (dc_is_dvi_signal(pipe_ctx->stream->signal))
@@ -3276,16 +3282,15 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
-       if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
                core_link_set_avmute(pipe_ctx, true);
        }
 
+       dc->hwss.blank_stream(pipe_ctx);
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
        update_psp_stream_config(pipe_ctx, true);
 #endif
 
-       dc->hwss.blank_stream(pipe_ctx);
-
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
 
index 5cb7b83..9bc03f2 100644 (file)
@@ -1133,6 +1133,44 @@ static inline enum link_training_result perform_link_training_int(
        return status;
 }
 
+static enum link_training_result check_link_loss_status(
+       struct dc_link *link,
+       const struct link_training_settings *link_training_setting)
+{
+       enum link_training_result status = LINK_TRAINING_SUCCESS;
+       union lane_status lane_status;
+       uint8_t dpcd_buf[6] = {0};
+       uint32_t lane;
+
+       core_link_read_dpcd(
+                       link,
+                       DP_SINK_COUNT,
+                       (uint8_t *)(dpcd_buf),
+                       sizeof(dpcd_buf));
+
+       /*parse lane status*/
+       for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+               /*
+                * check lanes status
+                */
+               lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
+
+               if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+                       !lane_status.bits.CR_DONE_0 ||
+                       !lane_status.bits.SYMBOL_LOCKED_0) {
+                       /* if one of the channel equalization, clock
+                        * recovery or symbol lock is dropped
+                        * consider it as (link has been
+                        * dropped) dp sink status has changed
+                        */
+                       status = LINK_TRAINING_LINK_LOSS;
+                       break;
+               }
+       }
+
+       return status;
+}
+
 static void initialize_training_settings(
         struct dc_link *link,
        const struct dc_link_settings *link_setting,
@@ -1372,6 +1410,9 @@ static void print_status_message(
        case LINK_TRAINING_LQA_FAIL:
                lt_result = "LQA failed";
                break;
+       case LINK_TRAINING_LINK_LOSS:
+               lt_result = "Link loss";
+               break;
        default:
                break;
        }
@@ -1531,6 +1572,14 @@ enum link_training_result dc_link_dp_perform_link_training(
                                status);
        }
 
+       /* delay 5ms after Main Link output idle pattern and then check
+        * DPCD 0202h.
+        */
+       if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+               msleep(5);
+               status = check_link_loss_status(link, &lt_settings);
+       }
+
        /* 6. print status message*/
        print_status_message(link, &lt_settings, status);
 
@@ -4290,22 +4339,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
-       uint8_t dspc = 0;
-       enum dc_status ret;
-
-       ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
-                                 sizeof(dspc));
-
-       if (ret != DC_OK) {
-               DC_LOG_ERROR("Error in DP aux read transaction,"
-                            " not writing source specific data\n");
-               return;
-       }
-
-       /* Return if OUI unsupported */
-       if (!(dspc & DP_OUI_SUPPORT))
-               return;
-
        if (!link->dc->vendor_signature.is_valid) {
                struct dpcd_amd_signature amd_signature;
                amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
index 10d69ad..0257a90 100644 (file)
@@ -246,20 +246,18 @@ struct dc_stream_status *dc_stream_get_status(
 
 #ifndef TRIM_FSFT
 /**
- * dc_optimize_timing() - dc to optimize timing
+ * dc_optimize_timing_for_fsft() - dc to optimize timing
  */
-bool dc_optimize_timing(
-       struct dc_crtc_timing *timing,
+bool dc_optimize_timing_for_fsft(
+       struct dc_stream_state *pStream,
        unsigned int max_input_rate_in_khz)
 {
-       //optimization is expected to assing a value to these:
-       //timing->pix_clk_100hz
-       //timing->v_front_porch
-       //timing->v_total
-       //timing->fast_transport_output_rate_100hz;
-       timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+       struct dc  *dc;
 
-       return true;
+       dc = pStream->ctx->dc;
+
+       return (dc->hwss.optimize_timing_for_fsft &&
+               dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz));
 }
 #endif
 
index 845a305..0811f94 100644 (file)
@@ -133,6 +133,13 @@ struct dc_vbios_funcs {
        uint16_t (*pack_data_tables)(
                struct dc_bios *dcb,
                void *dst);
+
+       enum bp_result (*get_atom_dc_golden_table)(
+                       struct dc_bios *dcb);
+
+       enum bp_result (*enable_lvtma_control)(
+               struct dc_bios *bios,
+               uint8_t uc_pwr_on);
 };
 
 struct bios_registers {
@@ -154,6 +161,7 @@ struct dc_bios {
        struct dc_firmware_info fw_info;
        bool fw_info_valid;
        struct dc_vram_info vram_info;
+       struct dc_golden_table golden_table;
 };
 
 #endif /* DC_BIOS_TYPES_H */
index e4e85a1..633442b 100644 (file)
@@ -424,8 +424,8 @@ struct dc_stream_status *dc_stream_get_status(
        struct dc_stream_state *dc_stream);
 
 #ifndef TRIM_FSFT
-bool dc_optimize_timing(
-       struct dc_crtc_timing *timing,
+bool dc_optimize_timing_for_fsft(
+       struct dc_stream_state *pStream,
        unsigned int max_input_rate_in_khz);
 #endif
 
index 29fe538..946ba92 100644 (file)
@@ -890,6 +890,20 @@ struct dsc_dec_dpcd_caps {
        uint32_t branch_max_line_width;
 };
 
+struct dc_golden_table {
+       uint16_t dc_golden_table_ver;
+       uint32_t aux_dphy_rx_control0_val;
+       uint32_t aux_dphy_tx_control_val;
+       uint32_t aux_dphy_rx_control1_val;
+       uint32_t dc_gpio_aux_ctrl_0_val;
+       uint32_t dc_gpio_aux_ctrl_1_val;
+       uint32_t dc_gpio_aux_ctrl_2_val;
+       uint32_t dc_gpio_aux_ctrl_3_val;
+       uint32_t dc_gpio_aux_ctrl_4_val;
+       uint32_t dc_gpio_aux_ctrl_5_val;
+};
+
+
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
 enum dc_gpu_mem_alloc_type {
        DC_MEM_ALLOC_TYPE_GART,
index 384389f..66027d4 100644 (file)
@@ -38,7 +38,8 @@
 
 #define AUX_REG_LIST(id)\
        SRI(AUX_CONTROL, DP_AUX, id), \
-       SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+       SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+       SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
 
 #define HPD_REG_LIST(id)\
        SRI(DC_HPD_CONTROL, HPD, id)
 struct dce110_link_enc_aux_registers {
        uint32_t AUX_CONTROL;
        uint32_t AUX_DPHY_RX_CONTROL0;
+       uint32_t AUX_DPHY_RX_CONTROL1;
 };
 
 struct dce110_link_enc_hpd_registers {
index 70ec691..99c68ca 100644 (file)
@@ -49,7 +49,7 @@
 #define DCN_PANEL_CNTL_REG_LIST()\
        DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
        DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
-       DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+       DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
        SR(BL_PWM_CNTL), \
        SR(BL_PWM_CNTL2), \
        SR(BL_PWM_PERIOD_CNTL), \
index 82e67bd..5167d6b 100644 (file)
@@ -233,8 +233,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        copy_settings_data->frame_cap_ind                       = psr_context->psrFrameCaptureIndicationReq;
        copy_settings_data->debug.bitfields.visual_confirm      = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
                                                                        true : false;
+       copy_settings_data->debug.bitfields.use_hw_lock_mgr     = 1;
        copy_settings_data->init_sdp_deadline                   = psr_context->sdpTransmitLineNumDeadline;
-       copy_settings_data->debug.bitfields.use_hw_lock_mgr     = 0;
 
        dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
index 49380ed..45c9e90 100644 (file)
@@ -842,6 +842,17 @@ void dce110_edp_power_control(
                cntl.coherent = false;
                cntl.lanes_number = LANE_COUNT_FOUR;
                cntl.hpd_sel = link->link_enc->hpd_source;
+
+               if (ctx->dc->ctx->dmub_srv &&
+                               ctx->dc->debug.dmub_command_table) {
+                       if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
+                               bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+                                               LVTMA_CONTROL_POWER_ON);
+                       else
+                               bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+                                               LVTMA_CONTROL_POWER_OFF);
+               }
+
                bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
 
                if (!power_up)
@@ -919,8 +930,21 @@ void dce110_edp_backlight_control(
                /*edp 1.2*/
        if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
                edp_receiver_ready_T7(link);
+
+       if (ctx->dc->ctx->dmub_srv &&
+                       ctx->dc->debug.dmub_command_table) {
+               if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
+                       ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+                                       LVTMA_CONTROL_LCD_BLON);
+               else
+                       ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
+                                       LVTMA_CONTROL_LCD_BLOFF);
+       }
+
        link_transmitter_control(ctx->dc_bios, &cntl);
 
+
+
        if (enable && link->dpcd_sink_ext_caps.bits.oled)
                msleep(OLED_POST_T7_DELAY);
 
index da0897f..a643927 100644 (file)
@@ -390,6 +390,8 @@ void dcn10_log_hw_state(struct dc *dc,
        }
        DTN_INFO("\n");
 
+       // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
+       // TODO: Update golden log header to reflect this name change
        DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
        for (i = 0; i < pool->res_cap->num_dsc; i++) {
                struct display_stream_compressor *dsc = pool->dscs[i];
@@ -400,7 +402,7 @@ void dcn10_log_hw_state(struct dc *dc,
                dsc->inst,
                        s.dsc_clock_en,
                        s.dsc_slice_width,
-                       s.dsc_bytes_per_pixel);
+                       s.dsc_bits_per_pixel);
                DTN_INFO("\n");
        }
        DTN_INFO("\n");
index cf59ab0..04dabed 100644 (file)
 #define TO_DCN10_LINK_ENC(link_encoder)\
        container_of(link_encoder, struct dcn10_link_encoder, base)
 
-
 #define AUX_REG_LIST(id)\
        SRI(AUX_CONTROL, DP_AUX, id), \
-       SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+       SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+       SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id)
 
 #define HPD_REG_LIST(id)\
        SRI(DC_HPD_CONTROL, HPD, id)
@@ -73,6 +73,7 @@ struct dcn10_link_enc_aux_registers {
        uint32_t AUX_CONTROL;
        uint32_t AUX_DPHY_RX_CONTROL0;
        uint32_t AUX_DPHY_TX_CONTROL;
+       uint32_t AUX_DPHY_RX_CONTROL1;
 };
 
 struct dcn10_link_enc_hpd_registers {
@@ -443,7 +444,10 @@ struct dcn10_link_enc_registers {
        type AUX_TX_PRECHARGE_LEN; \
        type AUX_TX_PRECHARGE_SYMBOLS; \
        type AUX_MODE_DET_CHECK_DELAY;\
-       type DPCS_DBG_CBUS_DIS
+       type DPCS_DBG_CBUS_DIS;\
+       type AUX_RX_PRECHARGE_SKIP;\
+       type AUX_RX_TIMEOUT_LEN;\
+       type AUX_RX_TIMEOUT_LEN_MUL
 
 struct dcn10_link_enc_shift {
        DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
index 07b2f93..842abb4 100644 (file)
@@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
        switch (packet_index) {
        case 0:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC0_FRAME_UPDATE, 1);
+                               AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
                break;
        case 1:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC1_FRAME_UPDATE, 1);
+                               AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
                break;
        case 2:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC2_FRAME_UPDATE, 1);
+                               AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
                break;
        case 3:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC3_FRAME_UPDATE, 1);
+                               AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
                break;
        case 4:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC4_FRAME_UPDATE, 1);
+                               AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
                break;
        case 5:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC5_FRAME_UPDATE, 1);
+                               AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
                break;
        case 6:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC6_FRAME_UPDATE, 1);
+                               AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
                break;
        case 7:
                REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
-                               AFMT_GENERIC7_FRAME_UPDATE, 1);
+                               AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
                break;
        default:
                break;
index ed385b1..30eae74 100644 (file)
@@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers {
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+       SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
        SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
@@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers {
        type AFMT_GENERIC2_FRAME_UPDATE;\
        type AFMT_GENERIC3_FRAME_UPDATE;\
        type AFMT_GENERIC4_FRAME_UPDATE;\
+       type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
        type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
+       type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
        type AFMT_GENERIC5_FRAME_UPDATE;\
        type AFMT_GENERIC6_FRAME_UPDATE;\
        type AFMT_GENERIC7_FRAME_UPDATE;\
index ba50214..79b640e 100644 (file)
@@ -156,7 +156,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
 
        REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en);
        REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width);
-       REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel);
+       REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel);
        REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height);
        REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size);
        REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
index 7725a40..c8cfd3b 100644 (file)
@@ -1457,8 +1457,8 @@ static void dcn20_update_dchubp_dpp(
 
        /* Any updates are handled in dc interface, just need to apply existing for plane enable */
        if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
-                       pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
-                       && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+                       pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+                       pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
                dc->hwss.set_cursor_position(pipe_ctx);
                dc->hwss.set_cursor_attribute(pipe_ctx);
 
@@ -2498,3 +2498,30 @@ void dcn20_fpga_init_hw(struct dc *dc)
                tg->funcs->tg_init(tg);
        }
 }
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+               struct dc_crtc_timing *timing,
+               unsigned int max_input_rate_in_khz)
+{
+       unsigned int old_v_front_porch;
+       unsigned int old_v_total;
+       unsigned int max_input_rate_in_100hz;
+       unsigned long long new_v_total;
+
+       max_input_rate_in_100hz = max_input_rate_in_khz * 10;
+       if (max_input_rate_in_100hz < timing->pix_clk_100hz)
+               return false;
+
+       old_v_total = timing->v_total;
+       old_v_front_porch = timing->v_front_porch;
+
+       timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
+       timing->pix_clk_100hz = max_input_rate_in_100hz;
+
+       new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
+
+       timing->v_total = new_v_total;
+       timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
+       return true;
+}
+#endif
index 63ce763..83220e3 100644 (file)
@@ -132,5 +132,10 @@ int dcn20_init_sys_ctx(struct dce_hwseq *hws,
                struct dc *dc,
                struct dc_phy_addr_space_config *pa_config);
 
+#ifndef TRIM_FSFT
+bool dcn20_optimize_timing_for_fsft(struct dc *dc,
+               struct dc_crtc_timing *timing,
+               unsigned int max_input_rate_in_khz);
+#endif
 #endif /* __DC_HWSS_DCN20_H__ */
 
index 2380392..3dde6f2 100644 (file)
@@ -88,6 +88,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .set_backlight_level = dce110_set_backlight_level,
        .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
        .set_pipe = dce110_set_pipe,
+#ifndef TRIM_FSFT
+       .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
index 8d209da..15c2ff2 100644 (file)
@@ -309,7 +309,6 @@ bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
 void enc2_hw_init(struct link_encoder *enc)
 {
        struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
-
 /*
        00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
        01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
@@ -333,9 +332,18 @@ void enc2_hw_init(struct link_encoder *enc)
        AUX_RX_PHASE_DETECT_LEN,  [21,20] = 0x3 default is 3
        AUX_RX_DETECTION_THRESHOLD [30:28] = 1
 */
-       AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+       if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) {
+               AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val);
+
+               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val);
+
+               AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val);
+       } else {
+               AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+
+               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
 
-       AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+       }
 
        //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
        // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
index db09f40..dcbf28d 100644 (file)
        LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\
        LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\
        LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\
-       LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh)
+       LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\
+       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
 
 #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\
        LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\
        LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \
        LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\
        LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\
-       LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh)
+       LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\
+       LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\
+       LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+       LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
 
 #define UNIPHY_DCN2_REG_LIST(id) \
        SRI(CLOCK_ENABLE, SYMCLK, id), \
index 968a89b..9140b3f 100644 (file)
@@ -2223,7 +2223,7 @@ int dcn20_populate_dml_pipes_from_context(
                if (!res_ctx->pipe_ctx[i].plane_state) {
                        pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
                        pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
-                       pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
+                       pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s;
                        pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
                        pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
                        if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
@@ -2235,7 +2235,7 @@ int dcn20_populate_dml_pipes_from_context(
                        pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width;
                        pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height;
                        pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width;
-                       pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
+                       pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256;
                        pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
                        pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
                        pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
@@ -3069,8 +3069,7 @@ void dcn20_calculate_dlg_params(
                int pipe_cnt,
                int vlevel)
 {
-       int i, j, pipe_idx, pipe_idx_unsplit;
-       bool visited[MAX_PIPES] = { 0 };
+       int i, pipe_idx;
 
        /* Writeback MCIF_WB arbitration parameters */
        dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3089,55 +3088,17 @@ void dcn20_calculate_dlg_params(
        if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
                context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
 
-       /*
-        * An artifact of dml pipe split/odm is that pipes get merged back together for
-        * calculation. Therefore we need to only extract for first pipe in ascending index order
-        * and copy into the other split half.
-        */
-       for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
-               if (!context->res_ctx.pipe_ctx[i].stream)
-                       continue;
-
-               if (!visited[pipe_idx]) {
-                       display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
-                       display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
-
-                       dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
-                       dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
-                       dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
-                       dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
-                       /*
-                        * j iterates inside pipes array, unlike i which iterates inside
-                        * pipe_ctx array
-                        */
-                       if (src->is_hsplit)
-                               for (j = pipe_idx + 1; j < pipe_cnt; j++) {
-                                       display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
-                                       display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
-
-                                       if (src_j->is_hsplit && !visited[j]
-                                                       && src->hsplit_grp == src_j->hsplit_grp) {
-                                               dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
-                                               dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
-                                               dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
-                                               dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
-                                               visited[j] = true;
-                                       }
-                               }
-                       visited[pipe_idx] = true;
-                       pipe_idx_unsplit++;
-               }
-               pipe_idx++;
-       }
-
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                if (!context->res_ctx.pipe_ctx[i].stream)
                        continue;
+               pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+               pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+               pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+               pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
                if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
                        context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
                context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
                                                pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
-               ASSERT(visited[pipe_idx]);
                context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
                pipe_idx++;
        }
@@ -3180,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
        int vlevel = 0;
        int pipe_split_from[MAX_PIPES];
        int pipe_cnt = 0;
-       display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+       display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
        DC_LOGGER_INIT(dc->ctx->logger);
 
        BW_VAL_TRACE_COUNT();
index 177d0dc..b187f71 100644 (file)
@@ -92,6 +92,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .set_backlight_level = dcn21_set_backlight_level,
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
+#ifndef TRIM_FSFT
+       .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
+#endif
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
index c29326e..2ae159e 100644 (file)
@@ -62,7 +62,7 @@ static const struct link_encoder_funcs dcn30_link_enc_funcs = {
        .read_state = link_enc2_read_state,
        .validate_output_with_stream =
                        dcn30_link_encoder_validate_output_with_stream,
-       .hw_init = enc2_hw_init,
+       .hw_init = enc3_hw_init,
        .setup = dcn10_link_encoder_setup,
        .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
        .enable_dp_output = dcn20_link_encoder_enable_dp_output,
@@ -203,3 +203,54 @@ void dcn30_link_encoder_construct(
                enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
        }
 }
+
+#define AUX_REG(reg)\
+       (enc10->aux_regs->reg)
+
+#define AUX_REG_READ(reg_name) \
+               dm_read_reg(CTX, AUX_REG(reg_name))
+
+#define AUX_REG_WRITE(reg_name, val) \
+                       dm_write_reg(CTX, AUX_REG(reg_name), val)
+void enc3_hw_init(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+/*
+       00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2
+       01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4
+       02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8
+       03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16
+       04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32
+       05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64
+       06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128
+       07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256
+*/
+
+/*
+       AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0,
+       AUX_RX_START_WINDOW = 1 [6:4]
+       AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8]
+       AUX_RX_HALF_SYM_DETECT_LEN  = 1 [13:12] default is 1
+       AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1
+       AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0  default is 0
+       AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1  default is 1
+       AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1  default is 1
+       AUX_RX_PHASE_DETECT_LEN,  [21,20] = 0x3 default is 3
+       AUX_RX_DETECTION_THRESHOLD [30:28] = 1
+*/
+       AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+
+       AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+
+       //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
+       // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
+       // 27MHz -> 0xd
+       // 100MHz -> 0x32
+       // 48MHz -> 0x18
+
+       // Set TMDS_CTL0 to 1.  This is a legacy setting.
+       REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1);
+
+       dcn10_aux_initialize(enc10);
+}
index 585d1ce..2fbf879 100644 (file)
        DPCS_DCN2_MASK_SH_LIST(mask_sh),\
        LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
-       LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
+       LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\
+       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh)
+
 
 void dcn30_link_encoder_construct(
        struct dcn20_link_encoder *enc20,
@@ -73,4 +76,6 @@ void dcn30_link_encoder_construct(
        const struct dcn10_link_enc_shift *link_shift,
        const struct dcn10_link_enc_mask *link_mask);
 
+void enc3_hw_init(struct link_encoder *enc);
+
 #endif /* __DC_LINK_ENCODER__DCN30_H__ */
index 1b354c2..9afee71 100644 (file)
@@ -26,6 +26,7 @@
 #include "dce110/dce110_hw_sequencer.h"
 #include "dcn10/dcn10_hw_sequencer.h"
 #include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
 #include "dcn30_hwseq.h"
 
 static const struct hw_sequencer_funcs dcn30_funcs = {
@@ -87,8 +88,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
-       .set_backlight_level = dce110_set_backlight_level,
-       .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+       .set_backlight_level = dcn21_set_backlight_level,
+       .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
index 653a571..ebe0cc5 100644 (file)
@@ -491,6 +491,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 [id] = {\
        LE_DCN3_REG_LIST(id), \
        UNIPHY_DCN2_REG_LIST(phyid), \
+       SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 }
 
 static const struct dce110_aux_registers_shift aux_shift = {
index b54814f..2beb284 100644 (file)
@@ -63,6 +63,7 @@ typedef struct {
 
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
+#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
 
 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
 static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3984,6 +3985,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                } else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
                                        v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
                                        v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+                               } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
+                                       v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+                                       v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
                                } else {
                                        v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
                                        v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
index 7916a7e..afdd4f0 100644 (file)
@@ -154,23 +154,11 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimeP
 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
 
+dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup);
 dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
 dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
 dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
 
-unsigned int get_vstartup_calculated(
-               struct display_mode_lib *mode_lib,
-               const display_e2e_pipe_params_st *pipes,
-               unsigned int num_pipes,
-               unsigned int which_pipe)
-{
-       unsigned int which_plane;
-
-       recalculate_params(mode_lib, pipes, num_pipes);
-       which_plane = mode_lib->vba.pipe_plane[which_pipe];
-       return mode_lib->vba.VStartup[which_plane];
-}
-
 double get_total_immediate_flip_bytes(
                struct display_mode_lib *mode_lib,
                const display_e2e_pipe_params_st *pipes,
@@ -479,7 +467,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
                mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
                        1;
                mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
-               mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
+               mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;;
+               mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
                mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
                                dout->dsc_slices;
                mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
index 756d8eb..21e5111 100644 (file)
@@ -98,16 +98,11 @@ dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us);
 dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us);
 dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us);
 
+dml_get_pipe_attr_decl(vstartup);
 dml_get_pipe_attr_decl(vupdate_offset);
 dml_get_pipe_attr_decl(vupdate_width);
 dml_get_pipe_attr_decl(vready_offset);
 
-unsigned int get_vstartup_calculated(
-               struct display_mode_lib *mode_lib,
-               const display_e2e_pipe_params_st *pipes,
-               unsigned int num_pipes,
-               unsigned int which_pipe);
-
 double get_total_immediate_flip_bytes(
                struct display_mode_lib *mode_lib,
                const display_e2e_pipe_params_st *pipes,
index 4e6e18b..7274305 100644 (file)
@@ -71,8 +71,9 @@ enum dentist_divider_range {
 
 #define CTX \
        clk_mgr->base.ctx
+
 #define DC_LOGGER \
-       clk_mgr->ctx->logger
+       clk_mgr->base.ctx->logger
 
 
 
index 5915994..f520e13 100644 (file)
@@ -55,7 +55,7 @@ struct dsc_optc_config {
 struct dcn_dsc_state {
        uint32_t dsc_clock_en;
        uint32_t dsc_slice_width;
-       uint32_t dsc_bytes_per_pixel;
+       uint32_t dsc_bits_per_pixel;
        uint32_t dsc_slice_height;
        uint32_t dsc_pic_width;
        uint32_t dsc_pic_height;
index 720ce5e..3c98671 100644 (file)
@@ -116,6 +116,11 @@ struct hw_sequencer_funcs {
        void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
                        int num_pipes,
                        const struct dc_static_screen_params *events);
+#ifndef TRIM_FSFT
+       bool (*optimize_timing_for_fsft)(struct dc *dc,
+                       struct dc_crtc_timing *timing,
+                       unsigned int max_input_rate_in_khz);
+#endif
 
        /* Stream Related */
        void (*enable_stream)(struct pipe_ctx *pipe_ctx);
index c30437a..21011ed 100644 (file)
@@ -101,6 +101,13 @@ enum bp_pipe_control_action {
        ASIC_PIPE_INIT
 };
 
+enum bp_lvtma_control_action {
+       LVTMA_CONTROL_LCD_BLOFF = 2,
+       LVTMA_CONTROL_LCD_BLON = 3,
+       LVTMA_CONTROL_POWER_ON = 12,
+       LVTMA_CONTROL_POWER_OFF = 13
+};
+
 struct bp_encoder_control {
        enum bp_encoder_control_action action;
        enum engine_id engine_id;
index 89ef9f6..16df2a4 100644 (file)
@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
  */
 static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
 {
+       if (arg1.value == 0)
+               return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
+
        return dc_fixpt_exp(
                dc_fixpt_mul(
                        dc_fixpt_log(arg1),
index 4869d45..550f46e 100644 (file)
@@ -66,6 +66,8 @@ enum link_training_result {
        /* other failure during EQ step */
        LINK_TRAINING_EQ_FAIL_EQ,
        LINK_TRAINING_LQA_FAIL,
+       /* one of the CR,EQ or symbol lock is dropped */
+       LINK_TRAINING_LINK_LOSS,
 };
 
 struct link_training_settings {
index 7a2500f..d988533 100644 (file)
@@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 
                /* Choose number of frames to insert based on how close it
                 * can get to the mid point of the variable range.
+                *  - Delta for CEIL: delta_from_mid_point_in_us_1
+                *  - Delta for FLOOR: delta_from_mid_point_in_us_2
                 */
-               if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
-                               (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
-                                               mid_point_frames_floor < 2)) {
+               if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+                       /* Check for out of range.
+                        * If using CEIL produces a value that is out of range,
+                        * then we are forced to use FLOOR.
+                        */
+                       frames_to_insert = mid_point_frames_floor;
+               } else if (mid_point_frames_floor < 2) {
+                       /* Check if FLOOR would result in non-LFC. In this case
+                        * choose to use CEIL
+                        */
+                       frames_to_insert = mid_point_frames_ceil;
+               } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+                       /* If choosing CEIL results in a frame duration that is
+                        * closer to the mid point of the range.
+                        * Choose CEIL
+                        */
                        frames_to_insert = mid_point_frames_ceil;
-                       delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
-                                       delta_from_mid_point_in_us_1;
                } else {
+                       /* If choosing FLOOR results in a frame duration that is
+                        * closer to the mid point of the range.
+                        * Choose FLOOR
+                        */
                        frames_to_insert = mid_point_frames_floor;
-                       delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
-                                       delta_from_mid_point_in_us_2;
                }
 
                /* Prefer current frame multiplier when BTR is enabled unless it drifts
                 * too far from the midpoint
                 */
+               if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+                       delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+                                       delta_from_mid_point_in_us_1;
+               } else {
+                       delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+                                       delta_from_mid_point_in_us_2;
+               }
                if (in_out_vrr->btr.frames_to_insert != 0 &&
                                delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
                        if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
@@ -829,10 +851,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
        switch (packet_type) {
        case PACKET_TYPE_FS_V3:
 #ifndef TRIM_FSFT
+               // always populate with pixel rate.
                build_vrr_infopacket_v3(
                                stream->signal, vrr,
                                stream->timing.flags.FAST_TRANSPORT,
-                               stream->timing.fast_transport_output_rate_100hz,
+                               (stream->timing.flags.FAST_TRANSPORT) ?
+                                               stream->timing.fast_transport_output_rate_100hz :
+                                               stream->timing.pix_clk_100hz,
                                app_tf, infopacket);
 #else
                build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
index c2544c8..3e526c3 100644 (file)
@@ -941,7 +941,6 @@ struct atom_display_controller_info_v4_1
   uint8_t  reserved3[8];
 };
 
-
 struct atom_display_controller_info_v4_2
 {
   struct  atom_common_table_header  table_header;
@@ -976,6 +975,59 @@ struct atom_display_controller_info_v4_2
   uint8_t  reserved3[8];
 };
 
+struct atom_display_controller_info_v4_4 {
+       struct atom_common_table_header table_header;
+       uint32_t display_caps;
+       uint32_t bootup_dispclk_10khz;
+       uint16_t dce_refclk_10khz;
+       uint16_t i2c_engine_refclk_10khz;
+       uint16_t dvi_ss_percentage;      // in unit of 0.001%
+       uint16_t dvi_ss_rate_10hz;
+       uint16_t hdmi_ss_percentage;     // in unit of 0.001%
+       uint16_t hdmi_ss_rate_10hz;
+       uint16_t dp_ss_percentage;       // in unit of 0.001%
+       uint16_t dp_ss_rate_10hz;
+       uint8_t dvi_ss_mode;             // enum of atom_spread_spectrum_mode
+       uint8_t hdmi_ss_mode;            // enum of atom_spread_spectrum_mode
+       uint8_t dp_ss_mode;              // enum of atom_spread_spectrum_mode
+       uint8_t ss_reserved;
+       uint8_t dfp_hardcode_mode_num;   // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
+       uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
+       uint8_t vga_hardcode_mode_num;   // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+       uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+       uint16_t dpphy_refclk_10khz;
+       uint16_t hw_chip_id;
+       uint8_t dcnip_min_ver;
+       uint8_t dcnip_max_ver;
+       uint8_t max_disp_pipe_num;
+       uint8_t max_vbios_active_disp_pipum;
+       uint8_t max_ppll_num;
+       uint8_t max_disp_phy_num;
+       uint8_t max_aux_pairs;
+       uint8_t remotedisplayconfig;
+       uint32_t dispclk_pll_vco_freq;
+       uint32_t dp_ref_clk_freq;
+       uint32_t max_mclk_chg_lat;       // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us)
+       uint32_t max_sr_exit_lat;        // Worst case memory self refresh exit time, units of 100ns of ns (0.1us)
+       uint32_t max_sr_enter_exit_lat;  // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us)
+       uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx
+       uint16_t dc_golden_table_ver;
+       uint32_t reserved3[3];
+};
+
+struct atom_dc_golden_table_v1
+{
+       uint32_t aux_dphy_rx_control0_val;
+       uint32_t aux_dphy_tx_control_val;
+       uint32_t aux_dphy_rx_control1_val;
+       uint32_t dc_gpio_aux_ctrl_0_val;
+       uint32_t dc_gpio_aux_ctrl_1_val;
+       uint32_t dc_gpio_aux_ctrl_2_val;
+       uint32_t dc_gpio_aux_ctrl_3_val;
+       uint32_t dc_gpio_aux_ctrl_4_val;
+       uint32_t dc_gpio_aux_ctrl_5_val;
+       uint32_t reserved[23];
+};
 
 enum dce_info_caps_def
 {
index 838a369..0826625 100644 (file)
@@ -133,6 +133,78 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
        return ret;
 }
 
+static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
+                                        bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       if (!smu->ppt_funcs->dpm_set_vcn_enable)
+               return 0;
+
+       if (atomic_read(&power_gate->vcn_gated) ^ enable)
+               return 0;
+
+       ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
+       if (!ret)
+               atomic_set(&power_gate->vcn_gated, !enable);
+
+       return ret;
+}
+
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+                                 bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       mutex_lock(&power_gate->vcn_gate_lock);
+
+       ret = smu_dpm_set_vcn_enable_locked(smu, enable);
+
+       mutex_unlock(&power_gate->vcn_gate_lock);
+
+       return ret;
+}
+
+static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
+                                         bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       if (!smu->ppt_funcs->dpm_set_jpeg_enable)
+               return 0;
+
+       if (atomic_read(&power_gate->jpeg_gated) ^ enable)
+               return 0;
+
+       ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
+       if (!ret)
+               atomic_set(&power_gate->jpeg_gated, !enable);
+
+       return ret;
+}
+
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+                                  bool enable)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int ret = 0;
+
+       mutex_lock(&power_gate->jpeg_gate_lock);
+
+       ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
+
+       mutex_unlock(&power_gate->jpeg_gate_lock);
+
+       return ret;
+}
+
 /**
  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
  *
@@ -353,6 +425,45 @@ static int smu_early_init(void *handle)
        return smu_set_funcs(adev);
 }
 
+static int smu_set_default_dpm_table(struct smu_context *smu)
+{
+       struct smu_power_context *smu_power = &smu->smu_power;
+       struct smu_power_gate *power_gate = &smu_power->power_gate;
+       int vcn_gate, jpeg_gate;
+       int ret = 0;
+
+       if (!smu->ppt_funcs->set_default_dpm_table)
+               return 0;
+
+       mutex_lock(&power_gate->vcn_gate_lock);
+       mutex_lock(&power_gate->jpeg_gate_lock);
+
+       vcn_gate = atomic_read(&power_gate->vcn_gated);
+       jpeg_gate = atomic_read(&power_gate->jpeg_gated);
+
+       ret = smu_dpm_set_vcn_enable_locked(smu, true);
+       if (ret)
+               goto err0_out;
+
+       ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+       if (ret)
+               goto err1_out;
+
+       ret = smu->ppt_funcs->set_default_dpm_table(smu);
+       if (ret)
+               dev_err(smu->adev->dev,
+                       "Failed to setup default dpm clock tables!\n");
+
+       smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
+err1_out:
+       smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
+err0_out:
+       mutex_unlock(&power_gate->jpeg_gate_lock);
+       mutex_unlock(&power_gate->vcn_gate_lock);
+
+       return ret;
+}
+
 static int smu_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -579,6 +690,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
        if (ret)
                return ret;
 
+       ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -586,6 +701,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
 {
        int ret;
 
+       smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+
        ret = smu_free_memory_pool(smu);
        if (ret)
                return ret;
@@ -643,6 +760,11 @@ static int smu_sw_init(void *handle)
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
 
+       atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+       atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
+       mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
+       mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
+
        smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
        smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
        smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
@@ -734,7 +856,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
        uint32_t pcie_gen = 0, pcie_width = 0;
        int ret;
 
-       if (smu_is_dpm_running(smu) && adev->in_suspend) {
+       if (adev->in_suspend && smu_is_dpm_running(smu)) {
                dev_info(adev->dev, "dpm has been enabled\n");
                return 0;
        }
@@ -844,10 +966,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
                return ret;
        }
 
-       ret = smu_i2c_init(smu, &adev->pm.smu_i2c);
-       if (ret)
-               return ret;
-
        ret = smu_disable_umc_cdr_12gbps_workaround(smu);
        if (ret) {
                dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
@@ -1046,8 +1164,6 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       smu_i2c_fini(smu, &adev->pm.smu_i2c);
-
        cancel_work_sync(&smu->throttling_logging_work);
 
        ret = smu_disable_thermal_alert(smu);
@@ -1590,6 +1706,9 @@ int smu_set_mp1_state(struct smu_context *smu,
        }
 
        ret = smu_send_smc_msg(smu, msg, NULL);
+       /* some asics may not support those messages */
+       if (ret == -EINVAL)
+               ret = 0;
        if (ret)
                dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
 
@@ -1944,6 +2063,10 @@ int smu_read_sensor(struct smu_context *smu,
 
        mutex_lock(&smu->mutex);
 
+       if (smu->ppt_funcs->read_sensor)
+               if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
+                       goto unlock;
+
        switch (sensor) {
        case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
                *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
@@ -1966,7 +2089,7 @@ int smu_read_sensor(struct smu_context *smu,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
-               *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1;
+               *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -1974,11 +2097,12 @@ int smu_read_sensor(struct smu_context *smu,
                *size = 4;
                break;
        default:
-               if (smu->ppt_funcs->read_sensor)
-                       ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+               *size = 0;
+               ret = -EOPNOTSUPP;
                break;
        }
 
+unlock:
        mutex_unlock(&smu->mutex);
 
        return ret;
index 3b9182c..fb962b9 100644 (file)
@@ -1849,8 +1849,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
 
 static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -1861,7 +1859,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                                return ret;
                        }
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
@@ -1870,7 +1867,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                                return ret;
                        }
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -2080,22 +2076,11 @@ static const struct i2c_algorithm arcturus_i2c_algo = {
        .functionality = arcturus_i2c_func,
 };
 
-static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control)
-{
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-
-       return control->dev.parent == &adev->pdev->dev;
-}
-
 static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
        int res;
 
-       /* smu_i2c_eeprom_init may be called twice in sriov */
-       if (arcturus_i2c_adapter_is_added(control))
-               return 0;
-
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
@@ -2111,9 +2096,6 @@ static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter
 
 static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
 {
-       if (!arcturus_i2c_adapter_is_added(control))
-               return;
-
        i2c_del_adapter(control);
 }
 
@@ -2222,14 +2204,17 @@ static const struct throttling_logging_label {
 };
 static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
 {
+       int ret;
        int throttler_idx, throtting_events = 0, buf_idx = 0;
        struct amdgpu_device *adev = smu->adev;
        uint32_t throttler_status;
        char log_buf[256];
 
-       arcturus_get_smu_metrics_data(smu,
-                                     METRICS_THROTTLER_STATUS,
-                                     &throttler_status);
+       ret = arcturus_get_smu_metrics_data(smu,
+                                           METRICS_THROTTLER_STATUS,
+                                           &throttler_status);
+       if (ret)
+               return;
 
        memset(log_buf, 0, sizeof(log_buf));
        for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
index 3b88396..ea70d73 100644 (file)
@@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
-       uint64_t features_enabled;
-       int i;
-       bool enabled;
-       int ret = 0;
+       int i, ret = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
                        PPSMC_MSG_DisableAllSmuFeatures,
@@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
                        "[DisableAllSMUFeatures] Failed to disable all smu features!",
                        return ret);
 
-       ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
-       PP_ASSERT_WITH_CODE(!ret,
-                       "[DisableAllSMUFeatures] Failed to get enabled smc features!",
-                       return ret);
-
-       for (i = 0; i < GNLD_FEATURES_MAX; i++) {
-               enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
-                       true : false;
-               data->smu_features[i].enabled = enabled;
-               data->smu_features[i].supported = enabled;
-       }
+       for (i = 0; i < GNLD_FEATURES_MAX; i++)
+               data->smu_features[i].enabled = 0;
 
        return 0;
 }
@@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
 
        data->uvd_power_gated = true;
        data->vce_power_gated = true;
-
-       if (data->smu_features[GNLD_DPM_UVD].enabled)
-               data->uvd_power_gated = false;
-
-       if (data->smu_features[GNLD_DPM_VCE].enabled)
-               data->vce_power_gated = false;
 }
 
 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
@@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
 
 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
 {
-       uint64_t features_enabled;
-       uint64_t features_to_enable;
-       uint64_t features_to_disable;
-       int ret = 0;
+       struct vega20_hwmgr *data =
+                       (struct vega20_hwmgr *)(hwmgr->backend);
+       uint64_t features_enabled, features_to_enable, features_to_disable;
+       int i, ret = 0;
+       bool enabled;
 
        if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
                return -EINVAL;
@@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
                        return ret;
        }
 
+       /* Update the cached feature enablement state */
+       ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+               enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
+                       true : false;
+               data->smu_features[i].enabled = enabled;
+       }
+
        return 0;
 }
 
index 28312d6..074458e 100644 (file)
@@ -292,8 +292,10 @@ struct smu_dpm_context {
 struct smu_power_gate {
        bool uvd_gated;
        bool vce_gated;
-       bool vcn_gated;
-       bool jpeg_gated;
+       atomic_t vcn_gated;
+       atomic_t jpeg_gated;
+       struct mutex vcn_gate_lock;
+       struct mutex jpeg_gate_lock;
 };
 
 struct smu_power_context {
index b2232e2..aa2708f 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x33
+#define SMU11_DRIVER_IF_VERSION 0x34
 
 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
 
@@ -968,9 +968,15 @@ typedef struct {
 
 typedef struct {
   uint32_t CurrClock[PPCLK_COUNT];
-  uint16_t AverageGfxclkFrequency;
-  uint16_t AverageFclkFrequency;
-  uint16_t AverageUclkFrequency  ;
+
+  uint16_t AverageGfxclkFrequencyPreDs;
+  uint16_t AverageGfxclkFrequencyPostDs;
+  uint16_t AverageFclkFrequencyPreDs;
+  uint16_t AverageFclkFrequencyPostDs;
+  uint16_t AverageUclkFrequencyPreDs  ;
+  uint16_t AverageUclkFrequencyPostDs  ;
+
+  
   uint16_t AverageGfxActivity    ;
   uint16_t AverageUclkActivity   ;
   uint8_t  CurrSocVoltageOffset  ;
@@ -988,6 +994,7 @@ typedef struct {
   uint16_t TemperatureLiquid0    ;
   uint16_t TemperatureLiquid1    ;  
   uint16_t TemperaturePlx        ;
+  uint16_t Padding16             ;
   uint32_t ThrottlerStatus       ; 
  
   uint8_t  LinkDpmLevel;
@@ -1006,8 +1013,10 @@ typedef struct {
   uint16_t AverageDclk0Frequency  ;  
   uint16_t AverageVclk1Frequency  ;
   uint16_t AverageDclk1Frequency  ;  
-  uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence
-  uint16_t padding16_2;
+  uint16_t VcnActivityPercentage  ; //place holder, David N. to provide full sequence
+  uint8_t  PcieRate               ;
+  uint8_t  PcieWidth              ;
+
 } SmuMetrics_t;
 
 typedef struct {
index 429f5aa..6a42331 100644 (file)
@@ -30,8 +30,8 @@
 #define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
index 6aaf483..9f62af9 100644 (file)
@@ -785,8 +785,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -796,14 +794,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -811,8 +807,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -821,14 +815,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
@@ -2457,22 +2449,11 @@ static const struct i2c_algorithm navi10_i2c_algo = {
        .functionality = navi10_i2c_func,
 };
 
-static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control)
-{
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-
-       return control->dev.parent == &adev->pdev->dev;
-}
-
 static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
        int res;
 
-       /* smu_i2c_eeprom_init may be called twice in sriov */
-       if (navi10_i2c_adapter_is_added(control))
-               return 0;
-
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
@@ -2488,9 +2469,6 @@ static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *
 
 static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
 {
-       if (!navi10_i2c_adapter_is_added(control))
-               return;
-
        i2c_del_adapter(control);
 }
 
index 575ae4b..dbb676c 100644 (file)
@@ -459,8 +459,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
 
 static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -470,14 +468,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -485,8 +481,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -495,14 +489,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
index 59da3ca..3865dbe 100644 (file)
        FEATURE_MASK(FEATURE_DPM_FCLK_BIT)       | \
        FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
 
+#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
+
 static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
        MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,          1),
-       MSG_MAP(SetAllowedFeaturesMaskLow,      PPSMC_MSG_SetAllowedFeaturesMaskLow,   1),
-       MSG_MAP(SetAllowedFeaturesMaskHigh,     PPSMC_MSG_SetAllowedFeaturesMaskHigh,  1),
-       MSG_MAP(EnableAllSmuFeatures,           PPSMC_MSG_EnableAllSmuFeatures,        1),
-       MSG_MAP(DisableAllSmuFeatures,          PPSMC_MSG_DisableAllSmuFeatures,       1),
+       MSG_MAP(SetAllowedFeaturesMaskLow,      PPSMC_MSG_SetAllowedFeaturesMaskLow,   0),
+       MSG_MAP(SetAllowedFeaturesMaskHigh,     PPSMC_MSG_SetAllowedFeaturesMaskHigh,  0),
+       MSG_MAP(EnableAllSmuFeatures,           PPSMC_MSG_EnableAllSmuFeatures,        0),
+       MSG_MAP(DisableAllSmuFeatures,          PPSMC_MSG_DisableAllSmuFeatures,       0),
        MSG_MAP(EnableSmuFeaturesLow,           PPSMC_MSG_EnableSmuFeaturesLow,        1),
        MSG_MAP(EnableSmuFeaturesHigh,          PPSMC_MSG_EnableSmuFeaturesHigh,       1),
        MSG_MAP(DisableSmuFeaturesLow,          PPSMC_MSG_DisableSmuFeaturesLow,       1),
@@ -85,42 +87,43 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow,    1),
        MSG_MAP(GetEnabledSmuFeaturesHigh,      PPSMC_MSG_GetRunningSmuFeaturesHigh,   1),
        MSG_MAP(SetWorkloadMask,                PPSMC_MSG_SetWorkloadMask,             1),
-       MSG_MAP(SetPptLimit,                    PPSMC_MSG_SetPptLimit,                 1),
-       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,       1),
-       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,        1),
-       MSG_MAP(SetToolsDramAddrHigh,           PPSMC_MSG_SetToolsDramAddrHigh,        1),
-       MSG_MAP(SetToolsDramAddrLow,            PPSMC_MSG_SetToolsDramAddrLow,         1),
-       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,       1),
-       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,       1),
-       MSG_MAP(UseDefaultPPTable,              PPSMC_MSG_UseDefaultPPTable,           1),
-       MSG_MAP(EnterBaco,                      PPSMC_MSG_EnterBaco,                   1),
-       MSG_MAP(SetSoftMinByFreq,               PPSMC_MSG_SetSoftMinByFreq,            1),
-       MSG_MAP(SetSoftMaxByFreq,               PPSMC_MSG_SetSoftMaxByFreq,            1),
+       MSG_MAP(SetPptLimit,                    PPSMC_MSG_SetPptLimit,                 0),
+       MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,       0),
+       MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,        0),
+       MSG_MAP(SetToolsDramAddrHigh,           PPSMC_MSG_SetToolsDramAddrHigh,        0),
+       MSG_MAP(SetToolsDramAddrLow,            PPSMC_MSG_SetToolsDramAddrLow,         0),
+       MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,       0),
+       MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,       0),
+       MSG_MAP(UseDefaultPPTable,              PPSMC_MSG_UseDefaultPPTable,           0),
+       MSG_MAP(EnterBaco,                      PPSMC_MSG_EnterBaco,                   0),
+       MSG_MAP(SetSoftMinByFreq,               PPSMC_MSG_SetSoftMinByFreq,            0),
+       MSG_MAP(SetSoftMaxByFreq,               PPSMC_MSG_SetSoftMaxByFreq,            0),
        MSG_MAP(SetHardMinByFreq,               PPSMC_MSG_SetHardMinByFreq,            1),
-       MSG_MAP(SetHardMaxByFreq,               PPSMC_MSG_SetHardMaxByFreq,            1),
+       MSG_MAP(SetHardMaxByFreq,               PPSMC_MSG_SetHardMaxByFreq,            0),
        MSG_MAP(GetMinDpmFreq,                  PPSMC_MSG_GetMinDpmFreq,               1),
        MSG_MAP(GetMaxDpmFreq,                  PPSMC_MSG_GetMaxDpmFreq,               1),
        MSG_MAP(GetDpmFreqByIndex,              PPSMC_MSG_GetDpmFreqByIndex,           1),
-       MSG_MAP(SetGeminiMode,                  PPSMC_MSG_SetGeminiMode,               1),
-       MSG_MAP(SetGeminiApertureHigh,          PPSMC_MSG_SetGeminiApertureHigh,       1),
-       MSG_MAP(SetGeminiApertureLow,           PPSMC_MSG_SetGeminiApertureLow,        1),
-       MSG_MAP(OverridePcieParameters,         PPSMC_MSG_OverridePcieParameters,      1),
-       MSG_MAP(ReenableAcDcInterrupt,          PPSMC_MSG_ReenableAcDcInterrupt,       1),
-       MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           1),
-       MSG_MAP(SetUclkFastSwitch,              PPSMC_MSG_SetUclkFastSwitch,           1),
-       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps,                 1),
+       MSG_MAP(SetGeminiMode,                  PPSMC_MSG_SetGeminiMode,               0),
+       MSG_MAP(SetGeminiApertureHigh,          PPSMC_MSG_SetGeminiApertureHigh,       0),
+       MSG_MAP(SetGeminiApertureLow,           PPSMC_MSG_SetGeminiApertureLow,        0),
+       MSG_MAP(OverridePcieParameters,         PPSMC_MSG_OverridePcieParameters,      0),
+       MSG_MAP(ReenableAcDcInterrupt,          PPSMC_MSG_ReenableAcDcInterrupt,       0),
+       MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           0),
+       MSG_MAP(SetUclkFastSwitch,              PPSMC_MSG_SetUclkFastSwitch,           0),
+       MSG_MAP(SetVideoFps,                    PPSMC_MSG_SetVideoFps,                 0),
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         1),
-       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,                 1),
-       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,              1),
-       MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 1),
+       MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,                 0),
+       MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,              0),
+       MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 0),
        MSG_MAP(GetDcModeMaxDpmFreq,            PPSMC_MSG_GetDcModeMaxDpmFreq,         1),
-       MSG_MAP(ExitBaco,                       PPSMC_MSG_ExitBaco,                    1),
-       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                  1),
-       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                1),
-       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                 1),
-       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,               1),
-       MSG_MAP(BacoAudioD3PME,                 PPSMC_MSG_BacoAudioD3PME,              1),
-       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       1),
+       MSG_MAP(ExitBaco,                       PPSMC_MSG_ExitBaco,                    0),
+       MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                  0),
+       MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                0),
+       MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                 0),
+       MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,               0),
+       MSG_MAP(BacoAudioD3PME,                 PPSMC_MSG_BacoAudioD3PME,              0),
+       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
+       MSG_MAP(Mode1Reset,                     PPSMC_MSG_Mode1Reset,                  0),
 };
 
 static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -442,13 +445,16 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->CurrClock[PPCLK_DCEFCLK];
                break;
        case METRICS_AVERAGE_GFXCLK:
-               *value = metrics->AverageGfxclkFrequency;
+               if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+                       *value = metrics->AverageGfxclkFrequencyPostDs;
+               else
+                       *value = metrics->AverageGfxclkFrequencyPreDs;
                break;
        case METRICS_AVERAGE_FCLK:
-               *value = metrics->AverageFclkFrequency;
+               *value = metrics->AverageFclkFrequencyPostDs;
                break;
        case METRICS_AVERAGE_UCLK:
-               *value = metrics->AverageUclkFrequency;
+               *value = metrics->AverageUclkFrequencyPostDs;
                break;
        case METRICS_AVERAGE_GFXACTIVITY:
                *value = metrics->AverageGfxActivity;
@@ -760,10 +766,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
 
 static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        struct amdgpu_device *adev = smu->adev;
-
        int ret = 0;
 
        if (enable) {
@@ -779,7 +782,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
                                        return ret;
                        }
                }
-               power_gate->vcn_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
@@ -792,7 +794,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
                                        return ret;
                        }
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -800,8 +801,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl
 
 static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
@@ -810,14 +809,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
                if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
                        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
@@ -2624,22 +2621,11 @@ static const struct i2c_algorithm sienna_cichlid_i2c_algo = {
        .functionality = sienna_cichlid_i2c_func,
 };
 
-static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control)
-{
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-
-       return control->dev.parent == &adev->pdev->dev;
-}
-
 static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
 {
        struct amdgpu_device *adev = to_amdgpu_device(control);
        int res;
 
-       /* smu_i2c_eeprom_init may be called twice in sriov */
-       if (sienna_cichlid_i2c_adapter_is_added(control))
-               return 0;
-
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
@@ -2655,9 +2641,6 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_a
 
 static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
 {
-       if (!sienna_cichlid_i2c_adapter_is_added(control))
-               return;
-
        i2c_del_adapter(control);
 }
 
index be4b678..5c23c44 100644 (file)
@@ -166,7 +166,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
 
        switch (type) {
        case CMN2ASIC_MAPPING_MSG:
-               if (index > SMU_MSG_MAX_COUNT ||
+               if (index >= SMU_MSG_MAX_COUNT ||
                    !smu->message_map)
                        return -EINVAL;
 
@@ -181,7 +181,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                return msg_mapping.map_to;
 
        case CMN2ASIC_MAPPING_CLK:
-               if (index > SMU_CLK_COUNT ||
+               if (index >= SMU_CLK_COUNT ||
                    !smu->clock_map)
                        return -EINVAL;
 
@@ -192,7 +192,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                return mapping.map_to;
 
        case CMN2ASIC_MAPPING_FEATURE:
-               if (index > SMU_FEATURE_COUNT ||
+               if (index >= SMU_FEATURE_COUNT ||
                    !smu->feature_map)
                        return -EINVAL;
 
@@ -203,7 +203,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                return mapping.map_to;
 
        case CMN2ASIC_MAPPING_TABLE:
-               if (index > SMU_TABLE_COUNT ||
+               if (index >= SMU_TABLE_COUNT ||
                    !smu->table_map)
                        return -EINVAL;
 
@@ -214,7 +214,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                return mapping.map_to;
 
        case CMN2ASIC_MAPPING_PWR:
-               if (index > SMU_POWER_SOURCE_COUNT ||
+               if (index >= SMU_POWER_SOURCE_COUNT ||
                    !smu->pwr_src_map)
                        return -EINVAL;
 
index d0deaef..264073d 100644 (file)
@@ -60,7 +60,6 @@
 #define smu_disable_all_features_with_exception(smu, mask)             smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
 #define smu_is_dpm_running(smu)                                                smu_ppt_funcs(is_dpm_running, 0 , smu)
 #define smu_notify_display_change(smu)                                 smu_ppt_funcs(notify_display_change, 0, smu)
-#define smu_set_default_dpm_table(smu)                                 smu_ppt_funcs(set_default_dpm_table, 0, smu)
 #define smu_populate_umd_state_clk(smu)                                        smu_ppt_funcs(populate_umd_state_clk, 0, smu)
 #define smu_set_default_od8_settings(smu)                              smu_ppt_funcs(set_default_od8_settings, 0, smu)
 #define smu_enable_thermal_alert(smu)                                  smu_ppt_funcs(enable_thermal_alert, 0, smu)
@@ -77,8 +76,6 @@
 #define smu_get_dal_power_level(smu, clocks)                           smu_ppt_funcs(get_dal_power_level, 0, smu, clocks)
 #define smu_get_perf_level(smu, designation, level)                    smu_ppt_funcs(get_perf_level, 0, smu, designation, level)
 #define smu_get_current_shallow_sleep_clocks(smu, clocks)              smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks)
-#define smu_dpm_set_vcn_enable(smu, enable)                            smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable)
-#define smu_dpm_set_jpeg_enable(smu, enable)                           smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable)
 #define smu_set_watermarks_table(smu, clock_ranges)                    smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges)
 #define smu_thermal_temperature_range_update(smu, range, rw)           smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw)
 #define smu_register_irq_handler(smu)                                  smu_ppt_funcs(register_irq_handler, 0, smu)
index fd82402..7b950a5 100644 (file)
@@ -1029,6 +1029,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
        case CHIP_NAVI14:
        case CHIP_NAVI12:
        case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
                if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                        return 0;
                if (enable)
index 02159ca..c18169a 100644 (file)
@@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
 
 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
 {
-       return ci_is_smc_ram_running(hwmgr);
+       return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+                                            CGS_IND_REG__SMC, FEATURE_STATUS,
+                                            VOLTAGE_CONTROLLER_ON))
+               ? true : false;
 }
 
 static int ci_smu_init(struct pp_hwmgr *hwmgr)
index 09b3228..b23cb2f 100644 (file)
@@ -4308,11 +4308,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
 {
        int ret;
 
-       port = drm_dp_mst_topology_get_port_validated(mgr, port);
-       if (!port)
+       if (slots < 0)
                return false;
 
-       if (slots < 0)
+       port = drm_dp_mst_topology_get_port_validated(mgr, port);
+       if (!port)
                return false;
 
        if (port->vcpi.vcpi > 0) {
@@ -4328,6 +4328,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
        if (ret) {
                DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
                              DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+               drm_dp_mst_topology_put_port(port);
                goto out;
        }
        DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
index bc38322..13068fd 100644 (file)
@@ -815,8 +815,7 @@ static void drm_dev_release(struct kref *ref)
 
        drm_managed_release(dev);
 
-       if (dev->managed.final_kfree)
-               kfree(dev->managed.final_kfree);
+       kfree(dev->managed.final_kfree);
 }
 
 /**
index d4e7c83..19d7386 100644 (file)
@@ -879,6 +879,9 @@ err:
  * @file_priv: drm file-private structure
  *
  * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
  */
 int
 drm_gem_open_ioctl(struct drm_device *dev, void *data,
index d00ea38..58f5dc2 100644 (file)
@@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
                },
                .driver_data = (void *)&lcd800x1280_rightside_up,
+       }, {    /* Asus T103HAF */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* GPD MicroPC (generic strings, also match on bios date) */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
index 729ec6e..b2ec3a5 100644 (file)
@@ -14930,7 +14930,7 @@ static int intel_atomic_check(struct drm_device *dev,
        if (any_ms && !check_digital_port_conflicts(state)) {
                drm_dbg_kms(&dev_priv->drm,
                            "rejecting conflicting digital port configuration\n");
-               ret = EINVAL;
+               ret = -EINVAL;
                goto fail;
        }
 
index 3644752..5a5cfe2 100644 (file)
@@ -2044,9 +2044,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
 {
        struct drm_connector *connector = m->private;
-       struct intel_encoder *encoder =
-                       intel_attached_encoder(to_intel_connector(connector));
        struct drm_i915_private *i915 = to_i915(connector->dev);
+       struct intel_encoder *encoder;
+
+       encoder = intel_attached_encoder(to_intel_connector(connector));
+       if (!encoder)
+               return -ENODEV;
 
        if (connector->status != connector_status_connected)
                return -ENODEV;
index 0c713e8..e0fcb89 100644 (file)
@@ -4147,6 +4147,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
                },
        },
        {
+               .name = "TC cold off",
+               .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
+               .ops = &tgl_tc_cold_off_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
                .name = "AUX A",
                .domains = TGL_AUX_A_IO_POWER_DOMAINS,
                .ops = &icl_aux_power_well_ops,
@@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
                        .hsw.irq_pipe_mask = BIT(PIPE_D),
                },
        },
-       {
-               .name = "TC cold off",
-               .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
-               .ops = &tgl_tc_cold_off_ops,
-               .id = DISP_PW_ID_NONE,
-       },
 };
 
 static const struct i915_power_well_desc rkl_power_wells[] = {
@@ -5240,10 +5240,10 @@ struct buddy_page_mask {
 };
 
 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
-       { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
        { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
        { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
        { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
+       { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
        {}
 };
 
index aaed9eb..bbde3b1 100644 (file)
@@ -1929,7 +1929,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
                return retval;
        }
 
-       level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+       level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
                             CRC_PMIC_PWM_PERIOD_NS);
        panel->backlight.level =
                intel_panel_compute_brightness(connector, level);
index e946032..2c2bf24 100644 (file)
@@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                                        locked = 1;
                                }
                                ret = pin_user_pages_remote
-                                       (work->task, mm,
+                                       (mm,
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
                                         flags,
index 072725a..ad86c5e 100644 (file)
@@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
 {
        u8 *cfg_base = vgpu_cfg_space(vgpu);
        u8 mask, new, old;
+       pci_power_t pwr;
        int i = 0;
 
        for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
@@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
        /* For other configuration space directly copy as it is. */
        if (i < bytes)
                memcpy(cfg_base + off + i, src + i, bytes - i);
+
+       if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
+               pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
+                       & PCI_PM_CTRL_STATE_MASK);
+               if (pwr == PCI_D3hot)
+                       vgpu->d3_entered = true;
+               gvt_dbg_core("vgpu-%d power status changed to %d\n",
+                            vgpu->id, pwr);
+       }
 }
 
 /**
@@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
        struct intel_gvt *gvt = vgpu->gvt;
        const struct intel_gvt_device_info *info = &gvt->device_info;
        u16 *gmch_ctl;
+       u8 next;
 
        memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
               info->cfg_space_size);
@@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
                pci_resource_len(gvt->gt->i915->drm.pdev, 2);
 
        memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
+
+       /* PM Support */
+       vgpu->cfg_space.pmcsr_off = 0;
+       if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
+               next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
+               do {
+                       if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
+                               vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
+                               break;
+                       }
+                       next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
+               } while (next);
+       }
 }
 
 /**
index 2100161..a3a4305 100644 (file)
@@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        return create_scratch_page_tree(vgpu);
 }
 
-static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 {
        struct list_head *pos, *n;
        struct intel_vgpu_mm *mm;
index 320b8d6..52d0d88 100644 (file)
@@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
        unsigned int off, void *p_data, unsigned int bytes);
 
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+
 #endif /* _GVT_GTT_H_ */
index a4a6db6..ff7f251 100644 (file)
@@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar {
 struct intel_vgpu_cfg_space {
        unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
        struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+       u32 pmcsr_off;
 };
 
 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
@@ -198,6 +199,8 @@ struct intel_vgpu {
        struct intel_vgpu_submission submission;
        struct radix_tree_root page_track_tree;
        u32 hws_pga[I915_NUM_ENGINES];
+       /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
+       bool d3_entered;
 
        struct dentry *debugfs;
 
index 7d36162..8fa9b31 100644 (file)
@@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
        intel_gvt_deactivate_vgpu(vgpu);
 
        mutex_lock(&vgpu->vgpu_lock);
+       vgpu->d3_entered = false;
        intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
        intel_vgpu_dmabuf_cleanup(vgpu);
        mutex_unlock(&vgpu->vgpu_lock);
@@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
        idr_init(&vgpu->object_idr);
        intel_vgpu_init_cfg_space(vgpu, param->primary);
+       vgpu->d3_entered = false;
 
        ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
@@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
        /* full GPU reset or device model level reset */
        if (engine_mask == ALL_ENGINES || dmlr) {
                intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
-               intel_vgpu_invalidate_ppgtt(vgpu);
+               if (engine_mask == ALL_ENGINES)
+                       intel_vgpu_invalidate_ppgtt(vgpu);
                /*fence will not be reset during virtual reset */
                if (dmlr) {
-                       intel_vgpu_reset_gtt(vgpu);
+                       if(!vgpu->d3_entered) {
+                               intel_vgpu_invalidate_ppgtt(vgpu);
+                               intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+                       }
+                       intel_vgpu_reset_ggtt(vgpu, true);
                        intel_vgpu_reset_resource(vgpu);
                }
 
@@ -572,7 +579,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                        intel_vgpu_reset_cfg_space(vgpu);
                        /* only reset the failsafe mode when dmlr reset */
                        vgpu->failsafe = false;
-                       vgpu->pv_notified = false;
+                       /*
+                        * PCI_D0 is set before dmlr, so reset d3_entered here
+                        * after done using.
+                        */
+                       if(vgpu->d3_entered)
+                               vgpu->d3_entered = false;
+                       else
+                               vgpu->pv_notified = false;
                }
        }
 
index 28bc5f1..0569942 100644 (file)
@@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event)
                container_of(event->pmu, typeof(*i915), pmu.base);
 
        drm_WARN_ON(&i915->drm, event->parent);
-
-       module_put(THIS_MODULE);
 }
 
 static int
@@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event)
        if (ret)
                return ret;
 
-       if (!event->parent) {
-               __module_get(THIS_MODULE);
+       if (!event->parent)
                event->destroy = i915_pmu_event_destroy;
-       }
 
        return 0;
 }
@@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
        if (!pmu->base.attr_groups)
                goto err_attr;
 
+       pmu->base.module        = THIS_MODULE;
        pmu->base.task_ctx_nr   = perf_invalid_context;
        pmu->base.event_init    = i915_pmu_event_init;
        pmu->base.add           = i915_pmu_event_add;
index 939a6ca..632b912 100644 (file)
@@ -8,8 +8,6 @@
 #include "../i915_selftest.h"
 #include "i915_random.h"
 
-#define SZ_8G (1ULL << 33)
-
 static void __igt_dump_block(struct i915_buddy_mm *mm,
                             struct i915_buddy_block *block,
                             bool buddy)
@@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm)
 static void igt_mm_config(u64 *size, u64 *chunk_size)
 {
        I915_RND_STATE(prng);
-       u64 s, ms;
+       u32 s, ms;
 
        /* Nothing fancy, just try to get an interesting bit pattern */
 
        prandom_seed_state(&prng, i915_selftest.random_seed);
 
-       s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
-       ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
-       s = max(s & -ms, ms);
+       /* Let size be a random number of pages up to 8 GB (2M pages) */
+       s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+       /* Let the chunk size be a random power of 2 less than size */
+       ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
+       /* Round size down to the chunk size */
+       s &= -ms;
 
-       *chunk_size = ms;
-       *size = s;
+       /* Convert from pages to bytes */
+       *chunk_size = (u64)ms << 12;
+       *size = (u64)s << 12;
 }
 
 static int igt_buddy_alloc_smoke(void *arg)
index 9a46be0..f127e63 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
+#include <linux/iommu.h>
 
 #include <drm/drm_managed.h>
 
@@ -77,6 +78,7 @@ static void mock_device_release(struct drm_device *dev)
        drm_mode_config_cleanup(&i915->drm);
 
 out:
+       i915_params_free(&i915->params);
        put_device(&i915->drm.pdev->dev);
        i915->drm.pdev = NULL;
 }
@@ -118,6 +120,9 @@ struct drm_i915_private *mock_gem_device(void)
 {
        struct drm_i915_private *i915;
        struct pci_dev *pdev;
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+       struct dev_iommu iommu;
+#endif
        int err;
 
        pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -136,8 +141,10 @@ struct drm_i915_private *mock_gem_device(void)
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 
 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
-       /* hack to disable iommu for the fake device; force identity mapping */
-       pdev->dev.archdata.iommu = (void *)-1;
+       /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */
+       memset(&iommu, 0, sizeof(iommu));
+       iommu.priv = (void *)-1;
+       pdev->dev.iommu = &iommu;
 #endif
 
        pci_set_drvdata(pdev, i915);
@@ -159,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void)
        i915->drm.pdev = pdev;
        drmm_add_final_kfree(&i915->drm, i915);
 
+       i915_params_copy(&i915->params, &i915_modparams);
+
        intel_runtime_pm_init_early(&i915->runtime_pm);
 
        /* Using the global GTT may ask questions about KMS users, so prepare */
index 6639ee9..4859393 100644 (file)
@@ -4915,6 +4915,7 @@ static int dispc_runtime_resume(struct device *dev)
 static const struct dev_pm_ops dispc_pm_ops = {
        .runtime_suspend = dispc_runtime_suspend,
        .runtime_resume = dispc_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
 };
 
 struct platform_driver omap_dispchw_driver = {
index 79ddfbf..eeccf40 100644 (file)
@@ -5467,6 +5467,7 @@ static int dsi_runtime_resume(struct device *dev)
 static const struct dev_pm_ops dsi_pm_ops = {
        .runtime_suspend = dsi_runtime_suspend,
        .runtime_resume = dsi_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
 };
 
 struct platform_driver omap_dsihw_driver = {
index 4d5739f..6ccbc29 100644 (file)
@@ -1614,6 +1614,7 @@ static int dss_runtime_resume(struct device *dev)
 static const struct dev_pm_ops dss_pm_ops = {
        .runtime_suspend = dss_runtime_suspend,
        .runtime_resume = dss_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
 };
 
 struct platform_driver omap_dsshw_driver = {
index 4406ce2..e081793 100644 (file)
@@ -903,6 +903,7 @@ static int venc_runtime_resume(struct device *dev)
 static const struct dev_pm_ops venc_pm_ops = {
        .runtime_suspend = venc_runtime_suspend,
        .runtime_resume = venc_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
 };
 
 static const struct of_device_id venc_of_match[] = {
index 5287645..de95dc1 100644 (file)
@@ -89,7 +89,7 @@ static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *conn
                                 struct drm_display_mode *mode)
 {
        struct omap_connector *omap_connector = to_omap_connector(connector);
-       struct drm_display_mode new_mode = { { 0 } };
+       struct drm_display_mode new_mode = {};
        enum drm_mode_status status;
 
        status = omap_connector_mode_fixup(omap_connector->output, mode,
index 1a49e61..e8f7b11 100644 (file)
@@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
                while (len) {
                        size_t pgsize = get_pgsize(iova | paddr, len);
 
-                       ops->map(ops, iova, paddr, pgsize, prot);
+                       ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
                        iova += pgsize;
                        paddr += pgsize;
                        len -= pgsize;
index 808c8af..09485c7 100644 (file)
@@ -154,7 +154,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
                                break;
                        case DISPC_VP_DPI:
                                enc_type = DRM_MODE_ENCODER_DPI;
-                               conn_type = DRM_MODE_CONNECTOR_LVDS;
+                               conn_type = DRM_MODE_CONNECTOR_DPI;
                                break;
                        default:
                                WARN_ON(1);
index f297fd5..cc6a4e7 100644 (file)
@@ -287,11 +287,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
         */
 
        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
-               bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
-
-               ret = ttm_tt_create(bo, zero);
-               if (ret)
-                       goto out_err;
+               if (bo->ttm == NULL) {
+                       bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+                       ret = ttm_tt_create(bo, zero);
+                       if (ret)
+                               goto out_err;
+               }
 
                ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
                if (ret)
@@ -652,8 +653,13 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
 
-       if (!placement.num_placement && !placement.num_busy_placement)
-               return ttm_bo_pipeline_gutting(bo);
+       if (!placement.num_placement && !placement.num_busy_placement) {
+               ret = ttm_bo_pipeline_gutting(bo);
+               if (ret)
+                       return ret;
+
+               return ttm_tt_create(bo, false);
+       }
 
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
@@ -1192,8 +1198,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
        /*
         * Remove the backing store if no placement is given.
         */
-       if (!placement->num_placement && !placement->num_busy_placement)
-               return ttm_bo_pipeline_gutting(bo);
+       if (!placement->num_placement && !placement->num_busy_placement) {
+               ret = ttm_bo_pipeline_gutting(bo);
+               if (ret)
+                       return ret;
+
+               return ttm_tt_create(bo, false);
+       }
 
        /*
         * Check whether we need to move buffer.
@@ -1210,6 +1221,14 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
                ttm_flag_masked(&bo->mem.placement, new_flags,
                                ~TTM_PL_MASK_MEMTYPE);
        }
+       /*
+        * We might need to add a TTM.
+        */
+       if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+               ret = ttm_tt_create(bo, true);
+               if (ret)
+                       return ret;
+       }
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_validate);
index 7fb3e0b..e6c8bd2 100644 (file)
@@ -531,15 +531,12 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                .interruptible = false,
                .no_wait_gpu = false
        };
-       struct ttm_tt *ttm;
+       struct ttm_tt *ttm = bo->ttm;
        pgprot_t prot;
        int ret;
 
-       ret = ttm_tt_create(bo, true);
-       if (ret)
-               return ret;
+       BUG_ON(!ttm);
 
-       ttm = bo->ttm;
        ret = ttm_tt_populate(ttm, &ctx);
        if (ret)
                return ret;
index d7a6537..33526c5 100644 (file)
@@ -351,11 +351,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 
                };
 
-               if (ttm_tt_create(bo, true)) {
-                       ret = VM_FAULT_OOM;
-                       goto out_io_unlock;
-               }
-
                ttm = bo->ttm;
                if (ttm_tt_populate(bo->ttm, &ctx)) {
                        ret = VM_FAULT_OOM;
@@ -510,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
                     void *buf, int len, int write)
 {
-       unsigned long offset = (addr) - vma->vm_start;
        struct ttm_buffer_object *bo = vma->vm_private_data;
+       unsigned long offset = (addr) - vma->vm_start +
+               ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+                << PAGE_SHIFT);
        int ret;
 
        if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
index 9d1c717..3437711 100644 (file)
@@ -50,9 +50,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
 
        dma_resv_assert_held(bo->base.resv);
 
-       if (bo->ttm)
-               return 0;
-
        if (bdev->need_dma32)
                page_flags |= TTM_PAGE_FLAG_DMA32;
 
@@ -70,6 +67,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
                page_flags |= TTM_PAGE_FLAG_SG;
                break;
        default:
+               bo->ttm = NULL;
                pr_err("Illegal buffer object type\n");
                return -EINVAL;
        }
index 7a2430e..c8da7ad 100644 (file)
@@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 
        virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
                              vfpriv->ctx_id, buflist, out_fence);
+       dma_fence_put(&out_fence->f);
        virtio_gpu_notify(vgdev);
        return 0;
 
index 0a5c8cf..4d944a0 100644 (file)
@@ -39,8 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
        u32 events_read, events_clear = 0;
 
        /* read the config space */
-       virtio_cread(vgdev->vdev, struct virtio_gpu_config,
-                    events_read, &events_read);
+       virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+                       events_read, &events_read);
        if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
                if (vgdev->has_edid)
                        virtio_gpu_cmd_get_edids(vgdev);
@@ -49,8 +49,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
                drm_helper_hpd_irq_event(vgdev->ddev);
                events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
        }
-       virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
-                     events_clear, &events_clear);
+       virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
+                        events_clear, &events_clear);
 }
 
 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -165,8 +165,8 @@ int virtio_gpu_init(struct drm_device *dev)
        }
 
        /* get display info */
-       virtio_cread(vgdev->vdev, struct virtio_gpu_config,
-                    num_scanouts, &num_scanouts);
+       virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+                       num_scanouts, &num_scanouts);
        vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
                                    VIRTIO_GPU_MAX_SCANOUTS);
        if (!vgdev->num_scanouts) {
@@ -176,8 +176,8 @@ int virtio_gpu_init(struct drm_device *dev)
        }
        DRM_INFO("number of scanouts: %d\n", num_scanouts);
 
-       virtio_cread(vgdev->vdev, struct virtio_gpu_config,
-                    num_capsets, &num_capsets);
+       virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
+                       num_capsets, &num_capsets);
        DRM_INFO("number of cap sets: %d\n", num_capsets);
 
        virtio_gpu_modeset_init(vgdev);
index 346cef5..e83651b 100644 (file)
@@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
                        }
 
                        sg_free_table(shmem->pages);
+                       kfree(shmem->pages);
                        shmem->pages = NULL;
                        drm_gem_shmem_unpin(&bo->base.base);
                }
@@ -141,7 +142,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
                                        struct virtio_gpu_mem_entry **ents,
                                        unsigned int *nents)
 {
-       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+       bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
        struct scatterlist *sg;
        int si, ret;
index 9e663a5..53af60d 100644 (file)
@@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
        struct virtio_gpu_transfer_to_host_2d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
-       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+       bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
@@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
-       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+       bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
        struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
        if (use_dma_api)
index 4284c4b..e67e2e8 100644 (file)
@@ -3037,7 +3037,7 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
        res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
                                         cmd->body.soid);
        if (IS_ERR(res)) {
-               DRM_ERROR("Cound not find streamoutput to bind.\n");
+               DRM_ERROR("Could not find streamoutput to bind.\n");
                return PTR_ERR(res);
        }
 
index bbce45d..312ed08 100644 (file)
@@ -186,7 +186,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
                /* TODO handle none page aligned offsets */
                /* TODO handle more dst & src != 0 */
                /* TODO handle more then one copy */
-               DRM_ERROR("Cant snoop dma request for cursor!\n");
+               DRM_ERROR("Can't snoop dma request for cursor!\n");
                DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
                          box->srcx, box->srcy, box->srcz,
                          box->x, box->y, box->z,
@@ -2575,7 +2575,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
                ++i;
        }
 
-       if (i != unit) {
+       if (&con->head == &dev_priv->dev->mode_config.connector_list) {
                DRM_ERROR("Could not find initial display unit.\n");
                ret = -EINVAL;
                goto out_unlock;
@@ -2599,13 +2599,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
                        break;
        }
 
-       if (mode->type & DRM_MODE_TYPE_PREFERRED)
-               *p_mode = mode;
-       else {
+       if (&mode->head == &con->modes) {
                WARN_ONCE(true, "Could not find initial preferred mode.\n");
                *p_mode = list_first_entry(&con->modes,
                                           struct drm_display_mode,
                                           head);
+       } else {
+               *p_mode = mode;
        }
 
  out_unlock:
index 16dafff..c4017c7 100644 (file)
@@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
        struct vmw_legacy_display_unit *entry;
        struct drm_framebuffer *fb = NULL;
        struct drm_crtc *crtc = NULL;
-       int i = 0;
+       int i;
 
        /* If there is no display topology the host just assumes
         * that the guest will set the same layout as the host.
@@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
                        crtc = &entry->base.crtc;
                        w = max(w, crtc->x + crtc->mode.hdisplay);
                        h = max(h, crtc->y + crtc->mode.vdisplay);
-                       i++;
                }
 
                if (crtc == NULL)
                        return 0;
-               fb = entry->base.crtc.primary->state->fb;
+               fb = crtc->primary->state->fb;
 
                return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
                                          fb->format->cpp[0] * 8,
@@ -388,8 +387,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
        ldu->base.is_implicit = true;
 
        /* Initialize primary plane */
-       vmw_du_plane_reset(primary);
-
        ret = drm_universal_plane_init(dev, &ldu->base.primary,
                                       0, &vmw_ldu_plane_funcs,
                                       vmw_primary_plane_formats,
@@ -403,8 +400,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
        drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
 
        /* Initialize cursor plane */
-       vmw_du_plane_reset(cursor);
-
        ret = drm_universal_plane_init(dev, &ldu->base.cursor,
                        0, &vmw_ldu_cursor_funcs,
                        vmw_cursor_plane_formats,
@@ -418,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
 
-       vmw_du_connector_reset(connector);
        ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
                                 DRM_MODE_CONNECTOR_VIRTUAL);
        if (ret) {
@@ -446,7 +440,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_encoder;
        }
 
-       vmw_du_crtc_reset(crtc);
        ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
                                        &ldu->base.cursor,
                                        &vmw_legacy_crtc_funcs, NULL);
@@ -521,6 +514,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 
        dev_priv->active_display_unit = vmw_du_legacy;
 
+       drm_mode_config_reset(dev);
+
        DRM_INFO("Legacy Display Unit initialized\n");
 
        return 0;
index 32a22e4..4bf0f5e 100644 (file)
@@ -859,8 +859,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
        sou->base.is_implicit = false;
 
        /* Initialize primary plane */
-       vmw_du_plane_reset(primary);
-
        ret = drm_universal_plane_init(dev, &sou->base.primary,
                                       0, &vmw_sou_plane_funcs,
                                       vmw_primary_plane_formats,
@@ -875,8 +873,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
        drm_plane_enable_fb_damage_clips(primary);
 
        /* Initialize cursor plane */
-       vmw_du_plane_reset(cursor);
-
        ret = drm_universal_plane_init(dev, &sou->base.cursor,
                        0, &vmw_sou_cursor_funcs,
                        vmw_cursor_plane_formats,
@@ -890,7 +886,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
 
-       vmw_du_connector_reset(connector);
        ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
                                 DRM_MODE_CONNECTOR_VIRTUAL);
        if (ret) {
@@ -918,8 +913,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_encoder;
        }
 
-
-       vmw_du_crtc_reset(crtc);
        ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
                                        &sou->base.cursor,
                                        &vmw_screen_object_crtc_funcs, NULL);
@@ -973,6 +966,8 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 
        dev_priv->active_display_unit = vmw_du_screen_object;
 
+       drm_mode_config_reset(dev);
+
        DRM_INFO("Screen Objects Display Unit initialized\n");
 
        return 0;
index 16b3856..cf3aafd 100644 (file)
@@ -1738,8 +1738,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
        stdu->base.is_implicit = false;
 
        /* Initialize primary plane */
-       vmw_du_plane_reset(primary);
-
        ret = drm_universal_plane_init(dev, primary,
                                       0, &vmw_stdu_plane_funcs,
                                       vmw_primary_plane_formats,
@@ -1754,8 +1752,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
        drm_plane_enable_fb_damage_clips(primary);
 
        /* Initialize cursor plane */
-       vmw_du_plane_reset(cursor);
-
        ret = drm_universal_plane_init(dev, cursor,
                        0, &vmw_stdu_cursor_funcs,
                        vmw_cursor_plane_formats,
@@ -1769,8 +1765,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs);
 
-       vmw_du_connector_reset(connector);
-
        ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
                                 DRM_MODE_CONNECTOR_VIRTUAL);
        if (ret) {
@@ -1798,7 +1792,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_encoder;
        }
 
-       vmw_du_crtc_reset(crtc);
        ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary,
                                        &stdu->base.cursor,
                                        &vmw_stdu_crtc_funcs, NULL);
@@ -1894,6 +1887,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
                }
        }
 
+       drm_mode_config_reset(dev);
+
        DRM_INFO("Screen Target Display device initialized\n");
 
        return 0;
index 126f93c..3914bfe 100644 (file)
@@ -1969,7 +1969,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
                num_mip = 1;
 
        num_subres = num_layers * num_mip;
-       dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
+       dirty_size = struct_size(dirty, boxes, num_subres);
        acc_size = ttm_round_pot(dirty_size);
        ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
                                   acc_size, &ctx);
index 3e660fb..013c9e0 100644 (file)
@@ -157,7 +157,8 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
 
 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
                              u64 dbuf_cookie, u32 width, u32 height,
-                             u32 bpp, u64 size, struct page **pages)
+                             u32 bpp, u64 size, u32 offset,
+                             struct page **pages)
 {
        struct xen_drm_front_evtchnl *evtchnl;
        struct xen_drm_front_dbuf *dbuf;
@@ -194,6 +195,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
        req->op.dbuf_create.gref_directory =
                        xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
        req->op.dbuf_create.buffer_sz = size;
+       req->op.dbuf_create.data_ofs = offset;
        req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
        req->op.dbuf_create.width = width;
        req->op.dbuf_create.height = height;
@@ -400,15 +402,15 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
        args->size = args->pitch * args->height;
 
        obj = xen_drm_front_gem_create(dev, args->size);
-       if (IS_ERR_OR_NULL(obj)) {
-               ret = PTR_ERR_OR_ZERO(obj);
+       if (IS_ERR(obj)) {
+               ret = PTR_ERR(obj);
                goto fail;
        }
 
        ret = xen_drm_front_dbuf_create(drm_info->front_info,
                                        xen_drm_front_dbuf_to_cookie(obj),
                                        args->width, args->height, args->bpp,
-                                       args->size,
+                                       args->size, 0,
                                        xen_drm_front_gem_get_pages(obj));
        if (ret)
                goto fail_backend;
index f92c258..54486d8 100644 (file)
@@ -145,7 +145,7 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
 
 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
                              u64 dbuf_cookie, u32 width, u32 height,
-                             u32 bpp, u64 size, struct page **pages);
+                             u32 bpp, u64 size, u32 offset, struct page **pages);
 
 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
                            u64 dbuf_cookie, u64 fb_cookie, u32 width,
index 459702f..44f1f70 100644 (file)
@@ -33,6 +33,7 @@ static const u32 plane_formats[] = {
        DRM_FORMAT_ARGB4444,
        DRM_FORMAT_XRGB1555,
        DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_YUYV,
 };
 
 const u32 *xen_drm_front_conn_get_formats(int *format_count)
index f0b85e0..39ff95b 100644 (file)
@@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
 
        size = round_up(size, PAGE_SIZE);
        xen_obj = gem_create_obj(dev, size);
-       if (IS_ERR_OR_NULL(xen_obj))
+       if (IS_ERR(xen_obj))
                return xen_obj;
 
        if (drm_info->front_info->cfg.be_alloc) {
@@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
         */
        xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
        xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
-       if (IS_ERR_OR_NULL(xen_obj->pages)) {
+       if (IS_ERR(xen_obj->pages)) {
                ret = PTR_ERR(xen_obj->pages);
                xen_obj->pages = NULL;
                goto fail;
@@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
        struct xen_gem_object *xen_obj;
 
        xen_obj = gem_create(dev, size);
-       if (IS_ERR_OR_NULL(xen_obj))
+       if (IS_ERR(xen_obj))
                return ERR_CAST(xen_obj);
 
        return &xen_obj->base;
@@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 
        size = attach->dmabuf->size;
        xen_obj = gem_create_obj(dev, size);
-       if (IS_ERR_OR_NULL(xen_obj))
+       if (IS_ERR(xen_obj))
                return ERR_CAST(xen_obj);
 
        ret = gem_alloc_pages_array(xen_obj, size);
@@ -210,7 +210,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
 
        ret = xen_drm_front_dbuf_create(drm_info->front_info,
                                        xen_drm_front_dbuf_to_cookie(&xen_obj->base),
-                                       0, 0, 0, size, xen_obj->pages);
+                                       0, 0, 0, size, sgt->sgl->offset,
+                                       xen_obj->pages);
        if (ret < 0)
                return ERR_PTR(ret);
 
index 78096bb..ef11b1e 100644 (file)
@@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
        int ret;
 
        fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
-       if (IS_ERR_OR_NULL(fb))
+       if (IS_ERR(fb))
                return fb;
 
        gem_obj = fb->obj[0];
index 821f7a7..99158ee 100644 (file)
@@ -44,7 +44,7 @@ MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
  */
 static uint zynqmp_dp_power_on_delay_ms = 4;
 module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
-MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
 
 /* Link configuration registers */
 #define ZYNQMP_DP_LINK_BW_SET                          0x0
@@ -567,34 +567,37 @@ static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
                                    u8 current_bw)
 {
        int max_rate = dp->link_config.max_rate;
-       u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+       u8 bw_code;
        u8 max_lanes = dp->link_config.max_lanes;
        u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
        u8 bpp = dp->config.bpp;
        u8 lane_cnt;
-       s8 i;
 
-       if (current_bw == DP_LINK_BW_1_62) {
+       /* Downshift from current bandwidth */
+       switch (current_bw) {
+       case DP_LINK_BW_5_4:
+               bw_code = DP_LINK_BW_2_7;
+               break;
+       case DP_LINK_BW_2_7:
+               bw_code = DP_LINK_BW_1_62;
+               break;
+       case DP_LINK_BW_1_62:
                dev_err(dp->dev, "can't downshift. already lowest link rate\n");
                return -EINVAL;
-       }
-
-       for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
-               if (current_bw && bws[i] >= current_bw)
-                       continue;
-
-               if (bws[i] <= max_link_rate_code)
-                       break;
+       default:
+               /* If not given, start with max supported */
+               bw_code = max_link_rate_code;
+               break;
        }
 
        for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
                int bw;
                u32 rate;
 
-               bw = drm_dp_bw_code_to_link_rate(bws[i]);
+               bw = drm_dp_bw_code_to_link_rate(bw_code);
                rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
                if (pclock <= rate) {
-                       dp->mode.bw_code = bws[i];
+                       dp->mode.bw_code = bw_code;
                        dp->mode.lane_cnt = lane_cnt;
                        dp->mode.pclock = pclock;
                        return dp->mode.bw_code;
@@ -1308,7 +1311,7 @@ zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
                ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
                                       sizeof(dp->dpcd));
                if (ret < 0) {
-                       dev_dbg(dp->dev, "DPCD read failes");
+                       dev_dbg(dp->dev, "DPCD read failed");
                        goto disconnected;
                }
 
index f2f3ef8..5180c56 100644 (file)
@@ -529,7 +529,7 @@ EXPORT_SYMBOL(vga_get);
  *
  * 0 on success, negative error code on failure.
  */
-int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
+static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
 {
        struct vga_device *vgadev;
        unsigned long flags;
@@ -554,7 +554,6 @@ bail:
        spin_unlock_irqrestore(&vga_lock, flags);
        return rc;
 }
-EXPORT_SYMBOL(vga_tryget);
 
 /**
  * vga_put - release lock on legacy VGA resources
index 45e87dc..05315b4 100644 (file)
@@ -20,7 +20,7 @@ config HID
          removed from the HID bus by the transport-layer drivers, such as
          usbhid (USB_HID) and hidp (BT_HIDP).
 
-         For docs and specs, see http://www.usb.org/developers/hidpage/
+         For docs and specs, see https://www.usb.org/developers/hidpage/
 
          If unsure, say Y.
 
index db1b55d..f64517b 100644 (file)
@@ -11,7 +11,7 @@
  * host communicates with the CP2112 via raw HID reports.
  *
  * Data Sheet:
- *   http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
+ *   https://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
  * Programming Interface Specification:
  *   https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
  */
index 6f370e0..6221888 100644 (file)
 #define USB_DEVICE_ID_LENOVO_CUSBKBD   0x6047
 #define USB_DEVICE_ID_LENOVO_CBTKBD    0x6048
 #define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL       0x6049
+#define USB_DEVICE_ID_LENOVO_TP10UBKBD 0x6062
 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 #define USB_DEVICE_ID_LOGITECH_G27_WHEEL       0xc29b
 #define USB_DEVICE_ID_LOGITECH_WII_WHEEL       0xc29c
 #define USB_DEVICE_ID_LOGITECH_ELITE_KBD       0xc30a
+#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO     0x0882
 #define USB_DEVICE_ID_S510_RECEIVER    0xc50c
 #define USB_DEVICE_ID_S510_RECEIVER_2  0xc517
 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500  0xc512
index dea9cc6..b8eabf2 100644 (file)
@@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev)
        u8 *buf;
        int ret;
 
-       buf = kmalloc(2, GFP_KERNEL);
+       buf = kmalloc(4, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
+       ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4,
                                 dev->battery_report_type, HID_REQ_GET_REPORT);
-       if (ret != 2) {
+       if (ret < 2) {
                kfree(buf);
                return -ENODATA;
        }
@@ -1560,21 +1560,12 @@ static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
 {
        struct hid_usage *usage;
        bool update_needed = false;
+       bool get_report_completed = false;
        int i, j;
 
        if (report->maxfield == 0)
                return false;
 
-       /*
-        * If we have more than one feature within this report we
-        * need to fill in the bits from the others before we can
-        * overwrite the ones for the Resolution Multiplier.
-        */
-       if (report->maxfield > 1) {
-               hid_hw_request(hid, report, HID_REQ_GET_REPORT);
-               hid_hw_wait(hid);
-       }
-
        for (i = 0; i < report->maxfield; i++) {
                __s32 value = use_logical_max ?
                              report->field[i]->logical_maximum :
@@ -1593,6 +1584,25 @@ static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
                        if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
                                continue;
 
+                       /*
+                        * If we have more than one feature within this
+                        * report we need to fill in the bits from the
+                        * others before we can overwrite the ones for the
+                        * Resolution Multiplier.
+                        *
+                        * But if we're not allowed to read from the device,
+                        * we just bail. Such a device should not exist
+                        * anyway.
+                        */
+                       if (!get_report_completed && report->maxfield > 1) {
+                               if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
+                                       return update_needed;
+
+                               hid_hw_request(hid, report, HID_REQ_GET_REPORT);
+                               hid_hw_wait(hid);
+                               get_report_completed = true;
+                       }
+
                        report->field[i]->value[j] = value;
                        update_needed = true;
                }
index 96fa2a2..c6c8e20 100644 (file)
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/leds.h>
+#include <linux/workqueue.h>
 
 #include "hid-ids.h"
 
-struct lenovo_drvdata_tpkbd {
+struct lenovo_drvdata {
+       u8 led_report[3]; /* Must be first for proper alignment */
        int led_state;
+       struct mutex led_report_mutex;
        struct led_classdev led_mute;
        struct led_classdev led_micmute;
+       struct work_struct fn_lock_sync_work;
+       struct hid_device *hdev;
        int press_to_select;
        int dragging;
        int release_to_select;
        int select_right;
        int sensitivity;
        int press_speed;
-};
-
-struct lenovo_drvdata_cptkbd {
        u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
        bool fn_lock;
-       int sensitivity;
 };
 
 #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
 
+#define TP10UBKBD_LED_OUTPUT_REPORT    9
+
+#define TP10UBKBD_FN_LOCK_LED          0x54
+#define TP10UBKBD_MUTE_LED             0x64
+#define TP10UBKBD_MICMUTE_LED          0x74
+
+#define TP10UBKBD_LED_OFF              1
+#define TP10UBKBD_LED_ON               2
+
+static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+                                    enum led_brightness value)
+{
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+       int ret;
+
+       mutex_lock(&data->led_report_mutex);
+
+       data->led_report[0] = TP10UBKBD_LED_OUTPUT_REPORT;
+       data->led_report[1] = led_code;
+       data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
+       ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
+                                HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+       if (ret)
+               hid_err(hdev, "Set LED output report error: %d\n", ret);
+
+       mutex_unlock(&data->led_report_mutex);
+}
+
+static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
+{
+       struct lenovo_drvdata *data =
+               container_of(work, struct lenovo_drvdata, fn_lock_sync_work);
+
+       lenovo_led_set_tp10ubkbd(data->hdev, TP10UBKBD_FN_LOCK_LED,
+                                data->fn_lock);
+}
+
 static const __u8 lenovo_pro_dock_need_fixup_collection[] = {
        0x05, 0x88,             /* Usage Page (Vendor Usage Page 0x88)  */
        0x09, 0x01,             /* Usage (Vendor Usage 0x01)            */
@@ -179,6 +217,44 @@ static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
        return 0;
 }
 
+static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
+               struct hid_input *hi, struct hid_field *field,
+               struct hid_usage *usage, unsigned long **bit, int *max)
+{
+       /*
+        * The ThinkPad 10 Ultrabook Keyboard uses 0x000c0001 usage for
+        * a bunch of keys which have no standard consumer page code.
+        */
+       if (usage->hid == 0x000c0001) {
+               switch (usage->usage_index) {
+               case 8: /* Fn-Esc: Fn-lock toggle */
+                       map_key_clear(KEY_FN_ESC);
+                       return 1;
+               case 9: /* Fn-F4: Mic mute */
+                       map_key_clear(KEY_MICMUTE);
+                       return 1;
+               case 10: /* Fn-F7: Control panel */
+                       map_key_clear(KEY_CONFIG);
+                       return 1;
+               case 11: /* Fn-F8: Search (magnifier glass) */
+                       map_key_clear(KEY_SEARCH);
+                       return 1;
+               case 12: /* Fn-F10: Open My computer (6 boxes) */
+                       map_key_clear(KEY_FILE);
+                       return 1;
+               }
+       }
+
+       /*
+        * The Ultrabook Keyboard sends a spurious F23 key-press when resuming
+        * from suspend and it does not actually have a F23 key, ignore it.
+        */
+       if (usage->hid == 0x00070072)
+               return -1;
+
+       return 0;
+}
+
 static int lenovo_input_mapping(struct hid_device *hdev,
                struct hid_input *hi, struct hid_field *field,
                struct hid_usage *usage, unsigned long **bit, int *max)
@@ -199,6 +275,9 @@ static int lenovo_input_mapping(struct hid_device *hdev,
        case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
                return lenovo_input_mapping_scrollpoint(hdev, hi, field,
                                                        usage, bit, max);
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
+                                                              usage, bit, max);
        default:
                return 0;
        }
@@ -242,7 +321,7 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
 static void lenovo_features_set_cptkbd(struct hid_device *hdev)
 {
        int ret;
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
 
        ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
        if (ret)
@@ -253,23 +332,23 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
                hid_err(hdev, "Sensitivity setting failed: %d\n", ret);
 }
 
-static ssize_t attr_fn_lock_show_cptkbd(struct device *dev,
+static ssize_t attr_fn_lock_show(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", cptkbd_data->fn_lock);
+       return snprintf(buf, PAGE_SIZE, "%u\n", data->fn_lock);
 }
 
-static ssize_t attr_fn_lock_store_cptkbd(struct device *dev,
+static ssize_t attr_fn_lock_store(struct device *dev,
                struct device_attribute *attr,
                const char *buf,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value))
@@ -277,8 +356,17 @@ static ssize_t attr_fn_lock_store_cptkbd(struct device *dev,
        if (value < 0 || value > 1)
                return -EINVAL;
 
-       cptkbd_data->fn_lock = !!value;
-       lenovo_features_set_cptkbd(hdev);
+       data->fn_lock = !!value;
+
+       switch (hdev->product) {
+       case USB_DEVICE_ID_LENOVO_CUSBKBD:
+       case USB_DEVICE_ID_LENOVO_CBTKBD:
+               lenovo_features_set_cptkbd(hdev);
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
+               break;
+       }
 
        return count;
 }
@@ -288,7 +376,7 @@ static ssize_t attr_sensitivity_show_cptkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
                cptkbd_data->sensitivity);
@@ -300,7 +388,7 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
@@ -313,10 +401,10 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
 }
 
 
-static struct device_attribute dev_attr_fn_lock_cptkbd =
+static struct device_attribute dev_attr_fn_lock =
        __ATTR(fn_lock, S_IWUSR | S_IRUGO,
-                       attr_fn_lock_show_cptkbd,
-                       attr_fn_lock_store_cptkbd);
+                       attr_fn_lock_show,
+                       attr_fn_lock_store);
 
 static struct device_attribute dev_attr_sensitivity_cptkbd =
        __ATTR(sensitivity, S_IWUSR | S_IRUGO,
@@ -325,7 +413,7 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
 
 
 static struct attribute *lenovo_attributes_cptkbd[] = {
-       &dev_attr_fn_lock_cptkbd.attr,
+       &dev_attr_fn_lock.attr,
        &dev_attr_sensitivity_cptkbd.attr,
        NULL
 };
@@ -354,10 +442,28 @@ static int lenovo_raw_event(struct hid_device *hdev,
        return 0;
 }
 
+static int lenovo_event_tp10ubkbd(struct hid_device *hdev,
+               struct hid_field *field, struct hid_usage *usage, __s32 value)
+{
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+
+       if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
+               /*
+                * The user has toggled the Fn-lock state. Toggle our own
+                * cached value of it and sync our value to the keyboard to
+                * ensure things are in sync (the sycning should be a no-op).
+                */
+               data->fn_lock = !data->fn_lock;
+               schedule_work(&data->fn_lock_sync_work);
+       }
+
+       return 0;
+}
+
 static int lenovo_event_cptkbd(struct hid_device *hdev,
                struct hid_field *field, struct hid_usage *usage, __s32 value)
 {
-       struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
 
        /* "wheel" scroll events */
        if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
@@ -396,6 +502,8 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
        case USB_DEVICE_ID_LENOVO_CUSBKBD:
        case USB_DEVICE_ID_LENOVO_CBTKBD:
                return lenovo_event_cptkbd(hdev, field, usage, value);
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               return lenovo_event_tp10ubkbd(hdev, field, usage, value);
        default:
                return 0;
        }
@@ -404,7 +512,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
 static int lenovo_features_set_tpkbd(struct hid_device *hdev)
 {
        struct hid_report *report;
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
 
@@ -425,7 +533,7 @@ static ssize_t attr_press_to_select_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
 }
@@ -436,7 +544,7 @@ static ssize_t attr_press_to_select_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value))
@@ -455,7 +563,7 @@ static ssize_t attr_dragging_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
 }
@@ -466,7 +574,7 @@ static ssize_t attr_dragging_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value))
@@ -485,7 +593,7 @@ static ssize_t attr_release_to_select_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
 }
@@ -496,7 +604,7 @@ static ssize_t attr_release_to_select_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value))
@@ -515,7 +623,7 @@ static ssize_t attr_select_right_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
 }
@@ -526,7 +634,7 @@ static ssize_t attr_select_right_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value))
@@ -545,7 +653,7 @@ static ssize_t attr_sensitivity_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
                data_pointer->sensitivity);
@@ -557,7 +665,7 @@ static ssize_t attr_sensitivity_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
@@ -574,7 +682,7 @@ static ssize_t attr_press_speed_show_tpkbd(struct device *dev,
                char *buf)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
                data_pointer->press_speed);
@@ -586,7 +694,7 @@ static ssize_t attr_press_speed_store_tpkbd(struct device *dev,
                size_t count)
 {
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int value;
 
        if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
@@ -642,12 +750,23 @@ static const struct attribute_group lenovo_attr_group_tpkbd = {
        .attrs = lenovo_attributes_tpkbd,
 };
 
-static enum led_brightness lenovo_led_brightness_get_tpkbd(
+static void lenovo_led_set_tpkbd(struct hid_device *hdev)
+{
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+       struct hid_report *report;
+
+       report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
+       report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
+       report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
+       hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+}
+
+static enum led_brightness lenovo_led_brightness_get(
                        struct led_classdev *led_cdev)
 {
        struct device *dev = led_cdev->dev->parent;
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
        int led_nr = 0;
 
        if (led_cdev == &data_pointer->led_micmute)
@@ -658,13 +777,13 @@ static enum led_brightness lenovo_led_brightness_get_tpkbd(
                                : LED_OFF;
 }
 
-static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev,
+static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
                        enum led_brightness value)
 {
        struct device *dev = led_cdev->dev->parent;
        struct hid_device *hdev = to_hid_device(dev);
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
-       struct hid_report *report;
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+       u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
        int led_nr = 0;
 
        if (led_cdev == &data_pointer->led_micmute)
@@ -675,21 +794,58 @@ static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev,
        else
                data_pointer->led_state |= 1 << led_nr;
 
-       report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
-       report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
-       report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
-       hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+       switch (hdev->product) {
+       case USB_DEVICE_ID_LENOVO_TPKBD:
+               lenovo_led_set_tpkbd(hdev);
+               break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+               break;
+       }
 }
 
-static int lenovo_probe_tpkbd(struct hid_device *hdev)
+static int lenovo_register_leds(struct hid_device *hdev)
 {
-       struct device *dev = &hdev->dev;
-       struct lenovo_drvdata_tpkbd *data_pointer;
-       size_t name_sz = strlen(dev_name(dev)) + 16;
-       char *name_mute, *name_micmute;
-       int i;
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+       size_t name_sz = strlen(dev_name(&hdev->dev)) + 16;
+       char *name_mute, *name_micm;
        int ret;
 
+       name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       name_micm = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       if (name_mute == NULL || name_micm == NULL) {
+               hid_err(hdev, "Could not allocate memory for led data\n");
+               return -ENOMEM;
+       }
+       snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(&hdev->dev));
+       snprintf(name_micm, name_sz, "%s:amber:micmute", dev_name(&hdev->dev));
+
+       data->led_mute.name = name_mute;
+       data->led_mute.brightness_get = lenovo_led_brightness_get;
+       data->led_mute.brightness_set = lenovo_led_brightness_set;
+       data->led_mute.dev = &hdev->dev;
+       ret = led_classdev_register(&hdev->dev, &data->led_mute);
+       if (ret < 0)
+               return ret;
+
+       data->led_micmute.name = name_micm;
+       data->led_micmute.brightness_get = lenovo_led_brightness_get;
+       data->led_micmute.brightness_set = lenovo_led_brightness_set;
+       data->led_micmute.dev = &hdev->dev;
+       ret = led_classdev_register(&hdev->dev, &data->led_micmute);
+       if (ret < 0) {
+               led_classdev_unregister(&data->led_mute);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int lenovo_probe_tpkbd(struct hid_device *hdev)
+{
+       struct lenovo_drvdata *data_pointer;
+       int i, ret;
+
        /*
         * Only register extra settings against subdevice where input_mapping
         * set drvdata to 1, i.e. the trackpoint.
@@ -712,7 +868,7 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
                hid_warn(hdev, "Could not create sysfs group: %d\n", ret);
 
        data_pointer = devm_kzalloc(&hdev->dev,
-                                   sizeof(struct lenovo_drvdata_tpkbd),
+                                   sizeof(struct lenovo_drvdata),
                                    GFP_KERNEL);
        if (data_pointer == NULL) {
                hid_err(hdev, "Could not allocate memory for driver data\n");
@@ -724,37 +880,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
        data_pointer->sensitivity = 0xa0;
        data_pointer->press_speed = 0x38;
 
-       name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
-       name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
-       if (name_mute == NULL || name_micmute == NULL) {
-               hid_err(hdev, "Could not allocate memory for led data\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-       snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-       snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
-
        hid_set_drvdata(hdev, data_pointer);
 
-       data_pointer->led_mute.name = name_mute;
-       data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
-       data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
-       data_pointer->led_mute.dev = dev;
-       ret = led_classdev_register(dev, &data_pointer->led_mute);
-       if (ret < 0)
-               goto err;
-
-       data_pointer->led_micmute.name = name_micmute;
-       data_pointer->led_micmute.brightness_get =
-               lenovo_led_brightness_get_tpkbd;
-       data_pointer->led_micmute.brightness_set =
-               lenovo_led_brightness_set_tpkbd;
-       data_pointer->led_micmute.dev = dev;
-       ret = led_classdev_register(dev, &data_pointer->led_micmute);
-       if (ret < 0) {
-               led_classdev_unregister(&data_pointer->led_mute);
+       ret = lenovo_register_leds(hdev);
+       if (ret)
                goto err;
-       }
 
        lenovo_features_set_tpkbd(hdev);
 
@@ -767,7 +897,7 @@ err:
 static int lenovo_probe_cptkbd(struct hid_device *hdev)
 {
        int ret;
-       struct lenovo_drvdata_cptkbd *cptkbd_data;
+       struct lenovo_drvdata *cptkbd_data;
 
        /* All the custom action happens on the USBMOUSE device for USB */
        if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD
@@ -811,6 +941,57 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
        return 0;
 }
 
+static struct attribute *lenovo_attributes_tp10ubkbd[] = {
+       &dev_attr_fn_lock.attr,
+       NULL
+};
+
+static const struct attribute_group lenovo_attr_group_tp10ubkbd = {
+       .attrs = lenovo_attributes_tp10ubkbd,
+};
+
+static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
+{
+       struct lenovo_drvdata *data;
+       int ret;
+
+       /* All the custom action happens on the USBMOUSE device for USB */
+       if (hdev->type != HID_TYPE_USBMOUSE)
+               return 0;
+
+       data = devm_kzalloc(&hdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       mutex_init(&data->led_report_mutex);
+       INIT_WORK(&data->fn_lock_sync_work, lenovo_tp10ubkbd_sync_fn_lock);
+       data->hdev = hdev;
+
+       hid_set_drvdata(hdev, data);
+
+       /*
+        * The Thinkpad 10 ultrabook USB kbd dock's Fn-lock defaults to on.
+        * We cannot read the state, only set it, so we force it to on here
+        * (which should be a no-op) to make sure that our state matches the
+        * keyboard's FN-lock state. This is the same as what Windows does.
+        */
+       data->fn_lock = true;
+       lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock);
+
+       ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
+       if (ret)
+               return ret;
+
+       ret = lenovo_register_leds(hdev);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
+       return ret;
+}
+
 static int lenovo_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
@@ -836,6 +1017,9 @@ static int lenovo_probe(struct hid_device *hdev,
        case USB_DEVICE_ID_LENOVO_CBTKBD:
                ret = lenovo_probe_cptkbd(hdev);
                break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               ret = lenovo_probe_tp10ubkbd(hdev);
+               break;
        default:
                ret = 0;
                break;
@@ -852,7 +1036,7 @@ err:
 
 static void lenovo_remove_tpkbd(struct hid_device *hdev)
 {
-       struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+       struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
 
        /*
         * Only the trackpoint half of the keyboard has drvdata and stuff that
@@ -874,6 +1058,20 @@ static void lenovo_remove_cptkbd(struct hid_device *hdev)
                        &lenovo_attr_group_cptkbd);
 }
 
+static void lenovo_remove_tp10ubkbd(struct hid_device *hdev)
+{
+       struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+
+       if (data == NULL)
+               return;
+
+       led_classdev_unregister(&data->led_micmute);
+       led_classdev_unregister(&data->led_mute);
+
+       sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
+       cancel_work_sync(&data->fn_lock_sync_work);
+}
+
 static void lenovo_remove(struct hid_device *hdev)
 {
        switch (hdev->product) {
@@ -884,6 +1082,9 @@ static void lenovo_remove(struct hid_device *hdev)
        case USB_DEVICE_ID_LENOVO_CBTKBD:
                lenovo_remove_cptkbd(hdev);
                break;
+       case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+               lenovo_remove_tp10ubkbd(hdev);
+               break;
        }
 
        hid_hw_stop(hdev);
@@ -920,6 +1121,7 @@ static const struct hid_device_id lenovo_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TP10UBKBD) },
        { }
 };
 
index e1b93ce..0d27ccb 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (c) 2020, Rishi Gupta <gupt21@gmail.com>
  *
- * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf
+ * Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf
  */
 
 #include <linux/module.h>
index 934fc0a..c242150 100644 (file)
@@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET },
 
        { 0 }
 };
index b0fbd11..b2e17ef 100644 (file)
@@ -16,7 +16,7 @@ MODULE_LICENSE("GPL");
 
 /*
  * Protocol information from:
- * http://brandonw.net/udraw/
+ * https://brandonw.net/udraw/
  * and the source code of:
  * https://vvvv.org/contribution/udraw-hid
  */
index 92874db..679e142 100644 (file)
@@ -1870,6 +1870,11 @@ static const struct hid_device_id wiimote_hid_devices[] = {
                                USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
 };
+
+bool wiimote_dpad_as_analog = false;
+module_param_named(dpad_as_analog, wiimote_dpad_as_analog, bool, 0644);
+MODULE_PARM_DESC(dpad_as_analog, "Use D-Pad as main analog input");
+
 MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
 
 static struct hid_driver wiimote_hid_driver = {
index 2c39253..213c58b 100644 (file)
@@ -1088,12 +1088,28 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext)
         * is the same as before.
         */
 
+       static const s8 digital_to_analog[3] = {0x20, 0, -0x20};
+
        if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
-               lx = ext[0] & 0x3e;
-               ly = ext[1] & 0x3e;
+               if (wiimote_dpad_as_analog) {
+                       lx = digital_to_analog[1 - !(ext[4] & 0x80)
+                               + !(ext[1] & 0x01)];
+                       ly = digital_to_analog[1 - !(ext[4] & 0x40)
+                               + !(ext[0] & 0x01)];
+               } else {
+                       lx = (ext[0] & 0x3e) - 0x20;
+                       ly = (ext[1] & 0x3e) - 0x20;
+               }
        } else {
-               lx = ext[0] & 0x3f;
-               ly = ext[1] & 0x3f;
+               if (wiimote_dpad_as_analog) {
+                       lx = digital_to_analog[1 - !(ext[4] & 0x80)
+                               + !(ext[5] & 0x02)];
+                       ly = digital_to_analog[1 - !(ext[4] & 0x40)
+                               + !(ext[5] & 0x01)];
+               } else {
+                       lx = (ext[0] & 0x3f) - 0x20;
+                       ly = (ext[1] & 0x3f) - 0x20;
+               }
        }
 
        rx = (ext[0] >> 3) & 0x18;
@@ -1110,20 +1126,14 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext)
        rt <<= 1;
        lt <<= 1;
 
-       input_report_abs(wdata->extension.input, ABS_HAT1X, lx - 0x20);
-       input_report_abs(wdata->extension.input, ABS_HAT1Y, ly - 0x20);
+       input_report_abs(wdata->extension.input, ABS_HAT1X, lx);
+       input_report_abs(wdata->extension.input, ABS_HAT1Y, ly);
        input_report_abs(wdata->extension.input, ABS_HAT2X, rx - 0x20);
        input_report_abs(wdata->extension.input, ABS_HAT2Y, ry - 0x20);
        input_report_abs(wdata->extension.input, ABS_HAT3X, rt);
        input_report_abs(wdata->extension.input, ABS_HAT3Y, lt);
 
        input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_RIGHT],
-                        !(ext[4] & 0x80));
-       input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_DOWN],
-                        !(ext[4] & 0x40));
-       input_report_key(wdata->extension.input,
                         wiimod_classic_map[WIIMOD_CLASSIC_KEY_LT],
                         !(ext[4] & 0x20));
        input_report_key(wdata->extension.input,
@@ -1157,20 +1167,29 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext)
                         wiimod_classic_map[WIIMOD_CLASSIC_KEY_ZR],
                         !(ext[5] & 0x04));
 
-       if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
-               input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT],
-                        !(ext[1] & 0x01));
-               input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP],
-                        !(ext[0] & 0x01));
-       } else {
+       if (!wiimote_dpad_as_analog) {
                input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT],
-                        !(ext[5] & 0x02));
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_RIGHT],
+                                !(ext[4] & 0x80));
                input_report_key(wdata->extension.input,
-                        wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP],
-                        !(ext[5] & 0x01));
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_DOWN],
+                                !(ext[4] & 0x40));
+
+               if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) {
+                       input_report_key(wdata->extension.input,
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT],
+                                !(ext[1] & 0x01));
+                       input_report_key(wdata->extension.input,
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP],
+                                !(ext[0] & 0x01));
+               } else {
+                       input_report_key(wdata->extension.input,
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT],
+                                !(ext[5] & 0x02));
+                       input_report_key(wdata->extension.input,
+                                wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP],
+                                !(ext[5] & 0x01));
+               }
        }
 
        input_sync(wdata->extension.input);
index b2a26a0..ad4ff83 100644 (file)
@@ -162,6 +162,8 @@ struct wiimote_data {
        struct work_struct init_worker;
 };
 
+extern bool wiimote_dpad_as_analog;
+
 /* wiimote modules */
 
 enum wiimod_module {
index f491d8b..c6d48a8 100644 (file)
@@ -106,6 +106,11 @@ static inline bool ish_should_enter_d0i3(struct pci_dev *pdev)
        return !pm_suspend_via_firmware() || pdev->device == CHV_DEVICE_ID;
 }
 
+static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
+{
+       return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
+}
+
 /**
  * ish_probe() - PCI driver probe callback
  * @pdev:      pci device
@@ -215,9 +220,7 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
        struct ishtp_device *dev = pci_get_drvdata(pdev);
        int ret;
 
-       /* Check the NO_D3 flag to distinguish the resume paths */
-       if (pdev->dev_flags & PCI_DEV_FLAGS_NO_D3) {
-               pdev->dev_flags &= ~PCI_DEV_FLAGS_NO_D3;
+       if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag) {
                disable_irq_wake(pdev->irq);
 
                ishtp_send_resume(dev);
@@ -281,8 +284,11 @@ static int __maybe_unused ish_suspend(struct device *device)
                         */
                        ish_disable_dma(dev);
                } else {
-                       /* Set the NO_D3 flag, the ISH would enter D0i3 */
-                       pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+                       /*
+                        * Save state so PCI core will keep the device at D0,
+                        * the ISH would enter D0i3
+                        */
+                       pci_save_state(pdev);
 
                        enable_irq_wake(pdev->irq);
                }
index 17a638f..492dd64 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/string.h>
+#include <linux/timekeeping.h>
 
 #include <linux/usb.h>
 
@@ -95,6 +96,18 @@ static int hid_start_in(struct hid_device *hid)
                                set_bit(HID_NO_BANDWIDTH, &usbhid->iofl);
                } else {
                        clear_bit(HID_NO_BANDWIDTH, &usbhid->iofl);
+
+                       if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
+                               /*
+                                * In case events are generated while nobody was
+                                * listening, some are released when the device
+                                * is re-opened. Wait 50 msec for the queue to
+                                * empty before allowing events to go through
+                                * hid.
+                                */
+                               usbhid->input_start_time =
+                                       ktime_add_ms(ktime_get_coarse(), 50);
+                       }
                }
        }
        spin_unlock_irqrestore(&usbhid->lock, flags);
@@ -280,20 +293,23 @@ static void hid_irq_in(struct urb *urb)
                if (!test_bit(HID_OPENED, &usbhid->iofl))
                        break;
                usbhid_mark_busy(usbhid);
-               if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
-                       hid_input_report(urb->context, HID_INPUT_REPORT,
-                                        urb->transfer_buffer,
-                                        urb->actual_length, 1);
-                       /*
-                        * autosuspend refused while keys are pressed
-                        * because most keyboards don't wake up when
-                        * a key is released
-                        */
-                       if (hid_check_keys_pressed(hid))
-                               set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
-                       else
-                               clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+               if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
+                       if (ktime_before(ktime_get_coarse(),
+                                        usbhid->input_start_time))
+                               break;
+                       clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
                }
+               hid_input_report(urb->context, HID_INPUT_REPORT,
+                                urb->transfer_buffer, urb->actual_length, 1);
+               /*
+                * autosuspend refused while keys are pressed
+                * because most keyboards don't wake up when
+                * a key is released
+                */
+               if (hid_check_keys_pressed(hid))
+                       set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
+               else
+                       clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
                break;
        case -EPIPE:            /* stall */
                usbhid_mark_busy(usbhid);
@@ -720,17 +736,6 @@ static int usbhid_open(struct hid_device *hid)
 
        usb_autopm_put_interface(usbhid->intf);
 
-       /*
-        * In case events are generated while nobody was listening,
-        * some are released when the device is re-opened.
-        * Wait 50 msec for the queue to empty before allowing events
-        * to go through hid.
-        */
-       if (res == 0)
-               msleep(50);
-
-       clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
-
  Done:
        mutex_unlock(&usbhid->mutex);
        return res;
@@ -1667,7 +1672,7 @@ struct usb_interface *usbhid_find_interface(int minor)
 
 static int __init hid_init(void)
 {
-       int retval = -ENOMEM;
+       int retval;
 
        retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS);
        if (retval)
index 75fe85d..c6ad684 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
@@ -83,6 +84,7 @@ struct usbhid_device {
        struct mutex mutex;                                             /* start/stop/open/close */
        spinlock_t lock;                                                /* fifo spinlock */
        unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
+       ktime_t input_start_time;                                       /* When to start handling input */
        struct timer_list io_retry;                                     /* Retry timer */
        unsigned long stop_retry;                                       /* Time to give up, in jiffies */
        unsigned int retry_delay;                                       /* Delay length in ms */
index b50081c..910b6e9 100644 (file)
@@ -86,6 +86,10 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
        struct die_args *die = (struct die_args *)args;
        struct pt_regs *regs = die->regs;
 
+       /* Don't notify Hyper-V if the die event is other than oops */
+       if (val != DIE_OOPS)
+               return NOTIFY_DONE;
+
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
         * doing hyperv_report_panic_msg() later with kmsg data, don't do
index 30b7b3e..17bb642 100644 (file)
@@ -447,7 +447,7 @@ static int pwm_fan_resume(struct device *dev)
                return 0;
 
        pwm_get_args(ctx->pwm, &pargs);
-       duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
+       duty = DIV_ROUND_UP_ULL(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
        ret = pwm_config(ctx->pwm, duty, pargs.period);
        if (ret)
                return ret;
index 826a105..32cd263 100644 (file)
@@ -6,9 +6,10 @@
 menuconfig HWSPINLOCK
        bool "Hardware Spinlock drivers"
 
+if HWSPINLOCK
+
 config HWSPINLOCK_OMAP
        tristate "OMAP Hardware Spinlock device"
-       depends on HWSPINLOCK
        depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST
        help
          Say y here to support the OMAP Hardware Spinlock device (firstly
@@ -18,7 +19,6 @@ config HWSPINLOCK_OMAP
 
 config HWSPINLOCK_QCOM
        tristate "Qualcomm Hardware Spinlock device"
-       depends on HWSPINLOCK
        depends on ARCH_QCOM || COMPILE_TEST
        select MFD_SYSCON
        help
@@ -30,7 +30,6 @@ config HWSPINLOCK_QCOM
 
 config HWSPINLOCK_SIRF
        tristate "SIRF Hardware Spinlock device"
-       depends on HWSPINLOCK
        depends on ARCH_SIRF || COMPILE_TEST
        help
          Say y here to support the SIRF Hardware Spinlock device, which
@@ -43,7 +42,6 @@ config HWSPINLOCK_SIRF
 config HWSPINLOCK_SPRD
        tristate "SPRD Hardware Spinlock device"
        depends on ARCH_SPRD || COMPILE_TEST
-       depends on HWSPINLOCK
        help
          Say y here to support the SPRD Hardware Spinlock device.
 
@@ -52,7 +50,6 @@ config HWSPINLOCK_SPRD
 config HWSPINLOCK_STM32
        tristate "STM32 Hardware Spinlock device"
        depends on MACH_STM32MP157 || COMPILE_TEST
-       depends on HWSPINLOCK
        help
          Say y here to support the STM32 Hardware Spinlock device.
 
@@ -60,7 +57,6 @@ config HWSPINLOCK_STM32
 
 config HSEM_U8500
        tristate "STE Hardware Semaphore functionality"
-       depends on HWSPINLOCK
        depends on ARCH_U8500 || COMPILE_TEST
        help
          Say y here to support the STE Hardware Semaphore functionality, which
@@ -68,3 +64,5 @@ config HSEM_U8500
          SoC.
 
          If unsure, say N.
+
+endif # HWSPINLOCK
index f0da544..3647109 100644 (file)
@@ -70,41 +70,79 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match);
 
-static int qcom_hwspinlock_probe(struct platform_device *pdev)
+static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev,
+                                                  u32 *base, u32 *stride)
 {
-       struct hwspinlock_device *bank;
        struct device_node *syscon;
-       struct reg_field field;
        struct regmap *regmap;
-       size_t array_size;
-       u32 stride;
-       u32 base;
        int ret;
-       int i;
 
        syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0);
-       if (!syscon) {
-               dev_err(&pdev->dev, "no syscon property\n");
-               return -ENODEV;
-       }
+       if (!syscon)
+               return ERR_PTR(-ENODEV);
 
        regmap = syscon_node_to_regmap(syscon);
        of_node_put(syscon);
        if (IS_ERR(regmap))
-               return PTR_ERR(regmap);
+               return regmap;
 
-       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base);
+       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, base);
        if (ret < 0) {
                dev_err(&pdev->dev, "no offset in syscon\n");
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
-       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride);
+       ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, stride);
        if (ret < 0) {
                dev_err(&pdev->dev, "no stride syscon\n");
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
+       return regmap;
+}
+
+static const struct regmap_config tcsr_mutex_config = {
+       .reg_bits               = 32,
+       .reg_stride             = 4,
+       .val_bits               = 32,
+       .max_register           = 0x40000,
+       .fast_io                = true,
+};
+
+static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev,
+                                                u32 *offset, u32 *stride)
+{
+       struct device *dev = &pdev->dev;
+       void __iomem *base;
+
+       /* All modern platform has offset 0 and stride of 4k */
+       *offset = 0;
+       *stride = 0x1000;
+
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return ERR_CAST(base);
+
+       return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config);
+}
+
+static int qcom_hwspinlock_probe(struct platform_device *pdev)
+{
+       struct hwspinlock_device *bank;
+       struct reg_field field;
+       struct regmap *regmap;
+       size_t array_size;
+       u32 stride;
+       u32 base;
+       int i;
+
+       regmap = qcom_hwspinlock_probe_syscon(pdev, &base, &stride);
+       if (IS_ERR(regmap) && PTR_ERR(regmap) == -ENODEV)
+               regmap = qcom_hwspinlock_probe_mmio(pdev, &base, &stride);
+
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
        array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
        bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL);
        if (!bank)
index 3889787..710fbef 100644 (file)
@@ -542,8 +542,8 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
 }
 EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
 
-MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
-       "Wolfram Sang <kernel@pengutronix.de>");
+MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
 MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm");
 MODULE_LICENSE("GPL");
 
index 88639e5..293e7a0 100644 (file)
@@ -146,6 +146,7 @@ config I2C_I801
            Elkhart Lake (PCH)
            Tiger Lake (PCH)
            Jasper Lake (SOC)
+           Emmitsburg (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index a43deea..fb93152 100644 (file)
@@ -519,9 +519,9 @@ static struct pci_driver ali1535_driver = {
 
 module_pci_driver(ali1535_driver);
 
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
-             "Philip Edelbrock <phil@netroedge.com>, "
-             "Mark D. Studebaker <mdsxyz123@yahoo.com> "
-             "and Dan Eaton <dan.eaton@rocketlogix.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Dan Eaton <dan.eaton@rocketlogix.com>");
 MODULE_DESCRIPTION("ALI1535 SMBus driver");
 MODULE_LICENSE("GPL");
index 02185a1..cc58fea 100644 (file)
@@ -502,8 +502,8 @@ static struct pci_driver ali15x3_driver = {
 
 module_pci_driver(ali15x3_driver);
 
-MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, "
-               "Philip Edelbrock <phil@netroedge.com>, "
-               "and Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
 MODULE_DESCRIPTION("ALI15X3 SMBus driver");
 MODULE_LICENSE("GPL");
index 2b14fef..34862ad 100644 (file)
@@ -381,7 +381,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
                        if (status)
                                return status;
                        len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
-                       /* fall through */
+                       fallthrough;
                case I2C_SMBUS_I2C_BLOCK_DATA:
                        for (i = 0; i < len; i++) {
                                status = amd_ec_read(smbus, AMD_SMB_DATA + i,
index f51702d..3126807 100644 (file)
@@ -504,7 +504,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                        goto error_and_stop;
                }
                irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
-               /* fall through */
+               fallthrough;
        case ASPEED_I2C_MASTER_TX_FIRST:
                if (bus->buf_index < msg->len) {
                        bus->master_state = ASPEED_I2C_MASTER_TX;
@@ -520,7 +520,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                /* RX may not have completed yet (only address cycle) */
                if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
                        goto out_no_complete;
-               /* fall through */
+               fallthrough;
        case ASPEED_I2C_MASTER_RX:
                if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
                        dev_err(bus->dev, "master failed to RX\n");
index 363d540..66864f9 100644 (file)
@@ -816,79 +816,16 @@ error:
        return ret;
 }
 
-static void at91_prepare_twi_recovery(struct i2c_adapter *adap)
-{
-       struct at91_twi_dev *dev = i2c_get_adapdata(adap);
-
-       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
-}
-
-static void at91_unprepare_twi_recovery(struct i2c_adapter *adap)
-{
-       struct at91_twi_dev *dev = i2c_get_adapdata(adap);
-
-       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
-}
-
 static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
                                       struct at91_twi_dev *dev)
 {
        struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
 
-       dev->pinctrl = devm_pinctrl_get(&pdev->dev);
-       if (!dev->pinctrl || IS_ERR(dev->pinctrl)) {
+       rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
+       if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
                dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
-               return PTR_ERR(dev->pinctrl);
+               return PTR_ERR(rinfo->pinctrl);
        }
-
-       dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl,
-                                                        PINCTRL_STATE_DEFAULT);
-       dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
-                                                     "gpio");
-       if (IS_ERR(dev->pinctrl_pins_default) ||
-           IS_ERR(dev->pinctrl_pins_gpio)) {
-               dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
-               return -EINVAL;
-       }
-
-       /*
-        * pins will be taken as GPIO, so we might as well inform pinctrl about
-        * this and move the state to GPIO
-        */
-       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
-
-       rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
-       if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
-
-       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
-                                         GPIOD_OUT_HIGH_OPEN_DRAIN);
-       if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
-
-       if (IS_ERR(rinfo->sda_gpiod) ||
-           IS_ERR(rinfo->scl_gpiod)) {
-               dev_info(&pdev->dev, "recovery information incomplete\n");
-               if (!IS_ERR(rinfo->sda_gpiod)) {
-                       gpiod_put(rinfo->sda_gpiod);
-                       rinfo->sda_gpiod = NULL;
-               }
-               if (!IS_ERR(rinfo->scl_gpiod)) {
-                       gpiod_put(rinfo->scl_gpiod);
-                       rinfo->scl_gpiod = NULL;
-               }
-               pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
-               return -EINVAL;
-       }
-
-       /* change the state of the pins back to their default state */
-       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
-
-       dev_info(&pdev->dev, "using scl, sda for recovery\n");
-
-       rinfo->prepare_recovery = at91_prepare_twi_recovery;
-       rinfo->unprepare_recovery = at91_unprepare_twi_recovery;
-       rinfo->recover_bus = i2c_generic_scl_recovery;
        dev->adapter.bus_recovery_info = rinfo;
 
        return 0;
index 7e7b495..eae673a 100644 (file)
@@ -157,9 +157,6 @@ struct at91_twi_dev {
        struct at91_twi_dma dma;
        bool slave_detected;
        struct i2c_bus_recovery_info rinfo;
-       struct pinctrl *pinctrl;
-       struct pinctrl_state *pinctrl_pins_default;
-       struct pinctrl_state *pinctrl_pins_gpio;
 #ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
        unsigned smr;
        struct i2c_client *slave;
index 8a3c988..688e928 100644 (file)
@@ -1078,7 +1078,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
        if (!iproc_i2c->slave)
                return -EINVAL;
 
-       iproc_i2c->slave = NULL;
+       disable_irq(iproc_i2c->irq);
 
        /* disable all slave interrupts */
        tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
@@ -1091,6 +1091,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
        tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
        iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
 
+       /* flush TX/RX FIFOs */
+       tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+       iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+       /* clear all pending slave interrupts */
+       iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+       iproc_i2c->slave = NULL;
+
+       enable_irq(iproc_i2c->irq);
+
        return 0;
 }
 
index d9b86fc..5dc5195 100644 (file)
@@ -392,7 +392,7 @@ static const struct i2c_algorithm bcm2835_i2c_algo = {
 
 /*
  * The BCM2835 was reported to have problems with clock stretching:
- * http://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html
+ * https://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html
  * https://www.raspberrypi.org/forums/viewtopic.php?p=146272
  */
 static const struct i2c_adapter_quirks bcm2835_i2c_quirks = {
index 8522134..55c83a7 100644 (file)
@@ -90,7 +90,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
        switch (pdev->device) {
        case 0x0817:
                dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
-               /* fall through */
+               fallthrough;
        case 0x0818:
        case 0x0819:
                c->bus_num = pdev->device - 0x817 + 3;
index a71bc58..0dfeb2d 100644 (file)
@@ -55,6 +55,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
        { "HISI02A1", 0 },
        { "HISI02A2", 0 },
        { "HISI02A3", 0 },
+       { "HYGO0010", ACCESS_INTR_MASK },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
index 332f004..f67639d 100644 (file)
@@ -187,7 +187,7 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id)
                        break;
                }
                i2c->state = STATE_WRITE;
-               /* fall through */
+               fallthrough;
        case STATE_WRITE:
                if (i2c->msgbuf_ptr < i2c->msg->len)
                        dc_i2c_write_buf(i2c);
index 73f1396..843b31a 100644 (file)
@@ -846,11 +846,10 @@ static void pch_i2c_remove(struct pci_dev *pdev)
        kfree(adap_info);
 }
 
-#ifdef CONFIG_PM
-static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_i2c_suspend(struct device *dev)
 {
-       int ret;
        int i;
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct adapter_info *adap_info = pci_get_drvdata(pdev);
        void __iomem *p = adap_info->pch_data[0].pch_base_address;
 
@@ -872,34 +871,13 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
                ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
                ioread32(p + PCH_I2CESRSTA));
 
-       ret = pci_save_state(pdev);
-
-       if (ret) {
-               pch_pci_err(pdev, "pci_save_state\n");
-               return ret;
-       }
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
        return 0;
 }
 
-static int pch_i2c_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_i2c_resume(struct device *dev)
 {
        int i;
-       struct adapter_info *adap_info = pci_get_drvdata(pdev);
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       if (pci_enable_device(pdev) < 0) {
-               pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n");
-               return -EIO;
-       }
-
-       pci_enable_wake(pdev, PCI_D3hot, 0);
+       struct adapter_info *adap_info = dev_get_drvdata(dev);
 
        for (i = 0; i < adap_info->ch_num; i++)
                pch_i2c_init(&adap_info->pch_data[i]);
@@ -908,18 +886,15 @@ static int pch_i2c_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#else
-#define pch_i2c_suspend NULL
-#define pch_i2c_resume NULL
-#endif
+
+static SIMPLE_DEV_PM_OPS(pch_i2c_pm_ops, pch_i2c_suspend, pch_i2c_resume);
 
 static struct pci_driver pch_pcidriver = {
        .name = KBUILD_MODNAME,
        .id_table = pch_pcidev_id,
        .probe = pch_i2c_probe,
        .remove = pch_i2c_remove,
-       .suspend = pch_i2c_suspend,
-       .resume = pch_i2c_resume
+       .driver.pm = &pch_i2c_pm_ops,
 };
 
 module_pci_driver(pch_pcidriver);
index 1a31935..a08554c 100644 (file)
@@ -442,6 +442,7 @@ static struct platform_driver em_i2c_driver = {
 module_platform_driver(em_i2c_driver);
 
 MODULE_DESCRIPTION("EMEV2 I2C bus driver");
-MODULE_AUTHOR("Ian Molton and Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_AUTHOR("Ian Molton");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, em_i2c_ids);
index 977d6f5..1033269 100644 (file)
@@ -703,7 +703,7 @@ static int fsi_i2c_probe(struct device *dev)
 
        for (port_no = 0; port_no < ports; port_no++) {
                np = fsi_i2c_find_port_of_node(dev->of_node, port_no);
-               if (np && !of_device_is_available(np))
+               if (!of_device_is_available(np))
                        continue;
 
                port = kzalloc(sizeof(*port), GFP_KERNEL);
index fea6449..e32ef3f 100644 (file)
@@ -54,6 +54,7 @@
  * Sunrise Point-H (PCH)       0xa123  32      hard    yes     yes     yes
  * Sunrise Point-LP (PCH)      0x9d23  32      hard    yes     yes     yes
  * DNV (SOC)                   0x19df  32      hard    yes     yes     yes
+ * Emmitsburg (PCH)            0x1bc9  32      hard    yes     yes     yes
  * Broxton (SOC)               0x5ad4  32      hard    yes     yes     yes
  * Lewisburg (PCH)             0xa1a3  32      hard    yes     yes     yes
  * Lewisburg Supersku (PCH)    0xa223  32      hard    yes     yes     yes
@@ -67,6 +68,7 @@
  * Comet Lake-H (PCH)          0x06a3  32      hard    yes     yes     yes
  * Elkhart Lake (PCH)          0x4b23  32      hard    yes     yes     yes
  * Tiger Lake-LP (PCH)         0xa0a3  32      hard    yes     yes     yes
+ * Tiger Lake-H (PCH)          0x43a3  32      hard    yes     yes     yes
  * Jasper Lake (SOC)           0x4da3  32      hard    yes     yes     yes
  * Comet Lake-V (PCH)          0xa3a3  32      hard    yes     yes     yes
  *
 #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS             0x0f12
 #define PCI_DEVICE_ID_INTEL_CDF_SMBUS                  0x18df
 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS                  0x19df
+#define PCI_DEVICE_ID_INTEL_EBG_SMBUS                  0x1bc9
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS          0x1c22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS             0x1d22
 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
 #define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS           0x31d4
 #define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS           0x34a3
 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS                0x3b30
+#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS          0x43a3
 #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS         0x4b23
 #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
@@ -1062,6 +1066,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
@@ -1074,6 +1079,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
        { 0, }
 };
@@ -1748,7 +1754,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
        case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
+       case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
                priv->features |= FEATURE_BLOCK_PROC;
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
@@ -1765,19 +1773,19 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
        case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
                priv->features |= FEATURE_IDF;
-               /* fall through */
+               fallthrough;
        default:
                priv->features |= FEATURE_BLOCK_PROC;
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
-               /* fall through */
+               fallthrough;
        case PCI_DEVICE_ID_INTEL_82801DB_3:
                priv->features |= FEATURE_SMBUS_PEC;
                priv->features |= FEATURE_BLOCK_BUFFER;
-               /* fall through */
+               fallthrough;
        case PCI_DEVICE_ID_INTEL_82801CA_3:
                priv->features |= FEATURE_HOST_NOTIFY;
-               /* fall through */
+               fallthrough;
        case PCI_DEVICE_ID_INTEL_82801BA_2:
        case PCI_DEVICE_ID_INTEL_82801AB_3:
        case PCI_DEVICE_ID_INTEL_82801AA_3:
@@ -1986,7 +1994,8 @@ static void __exit i2c_i801_exit(void)
        pci_unregister_driver(&i801_driver);
 }
 
-MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I801 SMBus driver");
 MODULE_LICENSE("GPL");
 
index deef69e..efc1404 100644 (file)
 
 #define I2C_DMA_CON_TX                 0x0000
 #define I2C_DMA_CON_RX                 0x0001
+#define I2C_DMA_ASYNC_MODE             0x0004
+#define I2C_DMA_SKIP_CONFIG            0x0010
+#define I2C_DMA_DIR_CHANGE             0x0200
 #define I2C_DMA_START_EN               0x0001
 #define I2C_DMA_INT_FLAG_NONE          0x0000
 #define I2C_DMA_CLR_FLAG               0x0000
 #define I2C_DMA_HARD_RST               0x0002
-#define I2C_DMA_4G_MODE                        0x0001
 
 #define MAX_SAMPLE_CNT_DIV             8
 #define MAX_STEP_CNT_DIV               64
@@ -201,10 +203,11 @@ struct mtk_i2c_compatible {
        unsigned char dcm: 1;
        unsigned char auto_restart: 1;
        unsigned char aux_len_reg: 1;
-       unsigned char support_33bits: 1;
        unsigned char timing_adjust: 1;
        unsigned char dma_sync: 1;
        unsigned char ltiming_adjust: 1;
+       unsigned char apdma_sync: 1;
+       unsigned char max_dma_support;
 };
 
 struct mtk_i2c_ac_timing {
@@ -250,14 +253,13 @@ struct mtk_i2c {
 
 /**
  * struct i2c_spec_values:
- * min_low_ns: min LOW period of the SCL clock
- * min_su_sta_ns: min set-up time for a repeated START condition
- * max_hd_dat_ns: max data hold time
- * min_su_dat_ns: min data set-up time
+ * @min_low_ns: min LOW period of the SCL clock
+ * @min_su_sta_ns: min set-up time for a repeated START condition
+ * @max_hd_dat_ns: max data hold time
+ * @min_su_dat_ns: min data set-up time
  */
 struct i2c_spec_values {
        unsigned int min_low_ns;
-       unsigned int min_high_ns;
        unsigned int min_su_sta_ns;
        unsigned int max_hd_dat_ns;
        unsigned int min_su_dat_ns;
@@ -307,10 +309,11 @@ static const struct mtk_i2c_compatible mt2712_compat = {
        .dcm = 1,
        .auto_restart = 1,
        .aux_len_reg = 1,
-       .support_33bits = 1,
        .timing_adjust = 1,
        .dma_sync = 0,
        .ltiming_adjust = 0,
+       .apdma_sync = 0,
+       .max_dma_support = 33,
 };
 
 static const struct mtk_i2c_compatible mt6577_compat = {
@@ -320,10 +323,11 @@ static const struct mtk_i2c_compatible mt6577_compat = {
        .dcm = 1,
        .auto_restart = 0,
        .aux_len_reg = 0,
-       .support_33bits = 0,
        .timing_adjust = 0,
        .dma_sync = 0,
        .ltiming_adjust = 0,
+       .apdma_sync = 0,
+       .max_dma_support = 32,
 };
 
 static const struct mtk_i2c_compatible mt6589_compat = {
@@ -333,10 +337,11 @@ static const struct mtk_i2c_compatible mt6589_compat = {
        .dcm = 0,
        .auto_restart = 0,
        .aux_len_reg = 0,
-       .support_33bits = 0,
        .timing_adjust = 0,
        .dma_sync = 0,
        .ltiming_adjust = 0,
+       .apdma_sync = 0,
+       .max_dma_support = 32,
 };
 
 static const struct mtk_i2c_compatible mt7622_compat = {
@@ -346,10 +351,11 @@ static const struct mtk_i2c_compatible mt7622_compat = {
        .dcm = 1,
        .auto_restart = 1,
        .aux_len_reg = 1,
-       .support_33bits = 0,
        .timing_adjust = 0,
        .dma_sync = 0,
        .ltiming_adjust = 0,
+       .apdma_sync = 0,
+       .max_dma_support = 32,
 };
 
 static const struct mtk_i2c_compatible mt8173_compat = {
@@ -358,10 +364,11 @@ static const struct mtk_i2c_compatible mt8173_compat = {
        .dcm = 1,
        .auto_restart = 1,
        .aux_len_reg = 1,
-       .support_33bits = 1,
        .timing_adjust = 0,
        .dma_sync = 0,
        .ltiming_adjust = 0,
+       .apdma_sync = 0,
+       .max_dma_support = 33,
 };
 
 static const struct mtk_i2c_compatible mt8183_compat = {
@@ -371,10 +378,25 @@ static const struct mtk_i2c_compatible mt8183_compat = {
        .dcm = 0,
        .auto_restart = 1,
        .aux_len_reg = 1,
-       .support_33bits = 1,
        .timing_adjust = 1,
        .dma_sync = 1,
        .ltiming_adjust = 1,
+       .apdma_sync = 0,
+       .max_dma_support = 33,
+};
+
+static const struct mtk_i2c_compatible mt8192_compat = {
+       .quirks = &mt8183_i2c_quirks,
+       .regs = mt_i2c_regs_v2,
+       .pmic_i2c = 0,
+       .dcm = 0,
+       .auto_restart = 1,
+       .aux_len_reg = 1,
+       .timing_adjust = 1,
+       .dma_sync = 1,
+       .ltiming_adjust = 1,
+       .apdma_sync = 1,
+       .max_dma_support = 36,
 };
 
 static const struct of_device_id mtk_i2c_of_match[] = {
@@ -384,6 +406,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
        { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
        { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
        { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
+       { .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat },
        {}
 };
 MODULE_DEVICE_TABLE(of, mtk_i2c_of_match);
@@ -786,11 +809,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
        return 0;
 }
 
-static inline u32 mtk_i2c_set_4g_mode(dma_addr_t addr)
-{
-       return (addr & BIT_ULL(32)) ? I2C_DMA_4G_MODE : I2C_DMA_CLR_FLAG;
-}
-
 static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                               int num, int left_num)
 {
@@ -798,6 +816,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
        u16 start_reg;
        u16 control_reg;
        u16 restart_flag = 0;
+       u16 dma_sync = 0;
        u32 reg_4g_mode;
        u8 *dma_rd_buf = NULL;
        u8 *dma_wr_buf = NULL;
@@ -851,10 +870,16 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN);
        }
 
+       if (i2c->dev_comp->apdma_sync) {
+               dma_sync = I2C_DMA_SKIP_CONFIG | I2C_DMA_ASYNC_MODE;
+               if (i2c->op == I2C_MASTER_WRRD)
+                       dma_sync |= I2C_DMA_DIR_CHANGE;
+       }
+
        /* Prepare buffer data to start transfer */
        if (i2c->op == I2C_MASTER_RD) {
                writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
-               writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
+               writel(I2C_DMA_CON_RX | dma_sync, i2c->pdmabase + OFFSET_CON);
 
                dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
                if (!dma_rd_buf)
@@ -868,8 +893,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                        return -ENOMEM;
                }
 
-               if (i2c->dev_comp->support_33bits) {
-                       reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+               if (i2c->dev_comp->max_dma_support > 32) {
+                       reg_4g_mode = upper_32_bits(rpaddr);
                        writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
                }
 
@@ -877,7 +902,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN);
        } else if (i2c->op == I2C_MASTER_WR) {
                writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
-               writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
+               writel(I2C_DMA_CON_TX | dma_sync, i2c->pdmabase + OFFSET_CON);
 
                dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
                if (!dma_wr_buf)
@@ -891,8 +916,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                        return -ENOMEM;
                }
 
-               if (i2c->dev_comp->support_33bits) {
-                       reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+               if (i2c->dev_comp->max_dma_support > 32) {
+                       reg_4g_mode = upper_32_bits(wpaddr);
                        writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
                }
 
@@ -900,7 +925,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
        } else {
                writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
-               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
+               writel(I2C_DMA_CLR_FLAG | dma_sync, i2c->pdmabase + OFFSET_CON);
 
                dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
                if (!dma_wr_buf)
@@ -937,11 +962,11 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
                        return -ENOMEM;
                }
 
-               if (i2c->dev_comp->support_33bits) {
-                       reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+               if (i2c->dev_comp->max_dma_support > 32) {
+                       reg_4g_mode = upper_32_bits(wpaddr);
                        writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
 
-                       reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+                       reg_4g_mode = upper_32_bits(rpaddr);
                        writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
                }
 
@@ -1215,8 +1240,9 @@ static int mtk_i2c_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       if (i2c->dev_comp->support_33bits) {
-               ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(33));
+       if (i2c->dev_comp->max_dma_support > 32) {
+               ret = dma_set_mask(&pdev->dev,
+                               DMA_BIT_MASK(i2c->dev_comp->max_dma_support));
                if (ret) {
                        dev_err(&pdev->dev, "dma_set_mask return error.\n");
                        return ret;
index 829b8c9..8d9d4ff 100644 (file)
@@ -251,7 +251,7 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
                                MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK;
                        break;
                }
-               /* FALLTHRU */
+               fallthrough;
        case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */
        case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */
                if ((drv_data->bytes_left == 0)
@@ -282,14 +282,14 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
                                MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK;
                        break;
                }
-               /* FALLTHRU */
+               fallthrough;
        case MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK: /* 0xe0 */
                if (drv_data->bytes_left == 0) {
                        drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
                        drv_data->state = MV64XXX_I2C_STATE_IDLE;
                        break;
                }
-               /* FALLTHRU */
+               fallthrough;
        case MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK: /* 0x50 */
                if (status != MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK)
                        drv_data->action = MV64XXX_I2C_ACTION_CONTINUE;
@@ -417,8 +417,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                        "mv64xxx_i2c_do_action: Invalid action: %d\n",
                        drv_data->action);
                drv_data->rc = -EIO;
-
-               /* FALLTHRU */
+               fallthrough;
        case MV64XXX_I2C_ACTION_SEND_STOP:
                drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
                writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
index e1e8d4e..d4b1b08 100644 (file)
@@ -1122,6 +1122,7 @@ static void __exit nmk_i2c_exit(void)
 subsys_initcall(nmk_i2c_init);
 module_exit(nmk_i2c_exit);
 
-MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR");
+MODULE_AUTHOR("Sachin Verma");
+MODULE_AUTHOR("Srinidhi KASAGAR");
 MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver");
 MODULE_LICENSE("GPL");
index 69740a4..8c1b31e 100644 (file)
@@ -1032,7 +1032,7 @@ static struct pci_driver piix4_driver = {
 
 module_pci_driver(piix4_driver);
 
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
-               "Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>");
 MODULE_DESCRIPTION("PIIX4 SMBus driver");
 MODULE_LICENSE("GPL");
index 5d7207c..8c4ec7f 100644 (file)
@@ -781,7 +781,8 @@ static void __exit i2c_adap_pnx_exit(void)
        platform_driver_unregister(&i2c_pnx_driver);
 }
 
-MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_AUTHOR("Dennis Kovalev <source@mvista.com>");
 MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pnx-i2c");
index 2e3e1bb..9e88347 100644 (file)
@@ -583,13 +583,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
                        rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
                }
 
-               rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
+               /* Clear SSR, too, because of old STOPs to other clients than us */
+               rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
        }
 
        /* master sent stop */
        if (ssr_filtered & SSR) {
                i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
-               rcar_i2c_write(priv, ICSIER, SAR | SSR);
+               rcar_i2c_write(priv, ICSIER, SAR);
                rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
        }
 
@@ -853,7 +854,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
        priv->slave = slave;
        rcar_i2c_write(priv, ICSAR, slave->addr);
        rcar_i2c_write(priv, ICSSR, 0);
-       rcar_i2c_write(priv, ICSIER, SAR | SSR);
+       rcar_i2c_write(priv, ICSIER, SAR);
        rcar_i2c_write(priv, ICSCR, SIE | SDBS);
 
        return 0;
@@ -865,12 +866,14 @@ static int rcar_unreg_slave(struct i2c_client *slave)
 
        WARN_ON(!priv->slave);
 
-       /* disable irqs and ensure none is running before clearing ptr */
+       /* ensure no irq is running before clearing ptr */
+       disable_irq(priv->irq);
        rcar_i2c_write(priv, ICSIER, 0);
-       rcar_i2c_write(priv, ICSCR, 0);
+       rcar_i2c_write(priv, ICSSR, 0);
+       enable_irq(priv->irq);
+       rcar_i2c_write(priv, ICSCR, SDBS);
        rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
 
-       synchronize_irq(priv->irq);
        priv->slave = NULL;
 
        pm_runtime_put(rcar_i2c_priv_to_dev(priv));
index 15324bf..8e3cc85 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
@@ -1040,8 +1041,21 @@ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
        return ret;
 }
 
-static int rk3x_i2c_xfer(struct i2c_adapter *adap,
-                        struct i2c_msg *msgs, int num)
+static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c)
+{
+       ktime_t timeout = ktime_add_ms(ktime_get(), WAIT_TIMEOUT);
+
+       while (READ_ONCE(i2c->busy) &&
+              ktime_compare(ktime_get(), timeout) < 0) {
+               udelay(5);
+               rk3x_i2c_irq(0, i2c);
+       }
+
+       return !i2c->busy;
+}
+
+static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
+                               struct i2c_msg *msgs, int num, bool polling)
 {
        struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
        unsigned long timeout, flags;
@@ -1075,8 +1089,12 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
 
                rk3x_i2c_start(i2c);
 
-               timeout = wait_event_timeout(i2c->wait, !i2c->busy,
-                                            msecs_to_jiffies(WAIT_TIMEOUT));
+               if (!polling) {
+                       timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+                                                    msecs_to_jiffies(WAIT_TIMEOUT));
+               } else {
+                       timeout = rk3x_i2c_wait_xfer_poll(i2c);
+               }
 
                spin_lock_irqsave(&i2c->lock, flags);
 
@@ -1110,6 +1128,18 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
        return ret < 0 ? ret : num;
 }
 
+static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+                        struct i2c_msg *msgs, int num)
+{
+       return rk3x_i2c_xfer_common(adap, msgs, num, false);
+}
+
+static int rk3x_i2c_xfer_polling(struct i2c_adapter *adap,
+                                struct i2c_msg *msgs, int num)
+{
+       return rk3x_i2c_xfer_common(adap, msgs, num, true);
+}
+
 static __maybe_unused int rk3x_i2c_resume(struct device *dev)
 {
        struct rk3x_i2c *i2c = dev_get_drvdata(dev);
@@ -1126,6 +1156,7 @@ static u32 rk3x_i2c_func(struct i2c_adapter *adap)
 
 static const struct i2c_algorithm rk3x_i2c_algorithm = {
        .master_xfer            = rk3x_i2c_xfer,
+       .master_xfer_atomic     = rk3x_i2c_xfer_polling,
        .functionality          = rk3x_i2c_func,
 };
 
index 2cca1b2..cab7255 100644 (file)
@@ -932,6 +932,7 @@ static void __exit sh_mobile_i2c_adap_exit(void)
 module_exit(sh_mobile_i2c_adap_exit);
 
 MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
-MODULE_AUTHOR("Magnus Damm and Wolfram Sang");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_AUTHOR("Wolfram Sang");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:i2c-sh_mobile");
index 9dcea2b..8f71f01 100644 (file)
@@ -180,6 +180,7 @@ static void __exit i2c_sibyte_exit(void)
 module_init(i2c_sibyte_init);
 module_exit(i2c_sibyte_exit);
 
-MODULE_AUTHOR("Kip Walker (Broadcom Corp.), Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Kip Walker (Broadcom Corp.)");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
 MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards");
 MODULE_LICENSE("GPL");
index d7f72ec..30db8fa 100644 (file)
@@ -470,6 +470,6 @@ static struct platform_driver i2c_sirfsoc_driver = {
 module_platform_driver(i2c_sirfsoc_driver);
 
 MODULE_DESCRIPTION("SiRF SoC I2C master controller driver");
-MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
-       "Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
+MODULE_AUTHOR("Xiangzhen Ye <Xiangzhen.Ye@csr.com>");
 MODULE_LICENSE("GPL v2");
index c9a3dba..31be181 100644 (file)
@@ -398,8 +398,7 @@ static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id)
 
                if (i2c->state == STATE_READ)
                        goto prepare_read;
-
-               /* fall through */
+               fallthrough;
 
        case STATE_WRITE:
                if (bsr & SYNQUACER_I2C_BSR_LRB) {
index 1577296..00d3e4d 100644 (file)
@@ -293,6 +293,8 @@ struct tegra_i2c_dev {
        bool is_curr_atomic_xfer;
 };
 
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit);
+
 static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
                       unsigned long reg)
 {
@@ -419,7 +421,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
        dma_addr_t dma_phys;
        int err;
 
-       if (!i2c_dev->hw->has_apb_dma)
+       if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi)
                return 0;
 
        if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
@@ -655,32 +657,47 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
        if (ret)
                return ret;
 
-       if (!i2c_dev->hw->has_single_clk_source) {
-               ret = clk_enable(i2c_dev->fast_clk);
-               if (ret < 0) {
-                       dev_err(i2c_dev->dev,
-                               "Enabling fast clk failed, err %d\n", ret);
-                       return ret;
-               }
+       ret = clk_enable(i2c_dev->fast_clk);
+       if (ret < 0) {
+               dev_err(i2c_dev->dev,
+                       "Enabling fast clk failed, err %d\n", ret);
+               return ret;
        }
 
-       if (i2c_dev->slow_clk) {
-               ret = clk_enable(i2c_dev->slow_clk);
-               if (ret < 0) {
-                       dev_err(dev, "failed to enable slow clock: %d\n", ret);
-                       return ret;
-               }
+       ret = clk_enable(i2c_dev->slow_clk);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable slow clock: %d\n", ret);
+               goto disable_fast_clk;
        }
 
        ret = clk_enable(i2c_dev->div_clk);
        if (ret < 0) {
                dev_err(i2c_dev->dev,
                        "Enabling div clk failed, err %d\n", ret);
-               clk_disable(i2c_dev->fast_clk);
-               return ret;
+               goto disable_slow_clk;
+       }
+
+       /*
+        * VI I2C device is attached to VE power domain which goes through
+        * power ON/OFF during PM runtime resume/suspend. So, controller
+        * should go through reset and need to re-initialize after power
+        * domain ON.
+        */
+       if (i2c_dev->is_vi) {
+               ret = tegra_i2c_init(i2c_dev, true);
+               if (ret)
+                       goto disable_div_clk;
        }
 
        return 0;
+
+disable_div_clk:
+       clk_disable(i2c_dev->div_clk);
+disable_slow_clk:
+       clk_disable(i2c_dev->slow_clk);
+disable_fast_clk:
+       clk_disable(i2c_dev->fast_clk);
+       return ret;
 }
 
 static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
@@ -688,12 +705,8 @@ static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
        struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
 
        clk_disable(i2c_dev->div_clk);
-
-       if (i2c_dev->slow_clk)
-               clk_disable(i2c_dev->slow_clk);
-
-       if (!i2c_dev->hw->has_single_clk_source)
-               clk_disable(i2c_dev->fast_clk);
+       clk_disable(i2c_dev->slow_clk);
+       clk_disable(i2c_dev->fast_clk);
 
        return pinctrl_pm_select_idle_state(i2c_dev->dev);
 }
@@ -1716,20 +1729,16 @@ static int tegra_i2c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, i2c_dev);
 
-       if (!i2c_dev->hw->has_single_clk_source) {
-               ret = clk_prepare(i2c_dev->fast_clk);
-               if (ret < 0) {
-                       dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
-                       return ret;
-               }
+       ret = clk_prepare(i2c_dev->fast_clk);
+       if (ret < 0) {
+               dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
+               return ret;
        }
 
-       if (i2c_dev->slow_clk) {
-               ret = clk_prepare(i2c_dev->slow_clk);
-               if (ret < 0) {
-                       dev_err(dev, "failed to prepare slow clock: %d\n", ret);
-                       goto unprepare_fast_clk;
-               }
+       ret = clk_prepare(i2c_dev->slow_clk);
+       if (ret < 0) {
+               dev_err(dev, "failed to prepare slow clock: %d\n", ret);
+               goto unprepare_fast_clk;
        }
 
        if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ &&
@@ -1750,7 +1759,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
                goto unprepare_slow_clk;
        }
 
-       pm_runtime_irq_safe(&pdev->dev);
+       /*
+        * VI I2C is in VE power domain which is not always on and not
+        * an IRQ safe. So, IRQ safe device can't be attached to a non-IRQ
+        * safe domain as it prevents powering off the PM domain.
+        * Also, VI I2C device don't need to use runtime IRQ safe as it will
+        * not be used for atomic transfers.
+        */
+       if (!i2c_dev->is_vi)
+               pm_runtime_irq_safe(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
        if (!pm_runtime_enabled(&pdev->dev)) {
                ret = tegra_i2c_runtime_resume(&pdev->dev);
@@ -1835,12 +1852,10 @@ unprepare_div_clk:
        clk_unprepare(i2c_dev->div_clk);
 
 unprepare_slow_clk:
-       if (i2c_dev->is_vi)
-               clk_unprepare(i2c_dev->slow_clk);
+       clk_unprepare(i2c_dev->slow_clk);
 
 unprepare_fast_clk:
-       if (!i2c_dev->hw->has_single_clk_source)
-               clk_unprepare(i2c_dev->fast_clk);
+       clk_unprepare(i2c_dev->fast_clk);
 
        return ret;
 }
@@ -1859,12 +1874,8 @@ static int tegra_i2c_remove(struct platform_device *pdev)
                tegra_i2c_runtime_suspend(&pdev->dev);
 
        clk_unprepare(i2c_dev->div_clk);
-
-       if (i2c_dev->slow_clk)
-               clk_unprepare(i2c_dev->slow_clk);
-
-       if (!i2c_dev->hw->has_single_clk_source)
-               clk_unprepare(i2c_dev->fast_clk);
+       clk_unprepare(i2c_dev->slow_clk);
+       clk_unprepare(i2c_dev->fast_clk);
 
        tegra_i2c_release_dma(i2c_dev);
        return 0;
index 4abc777..970ccdc 100644 (file)
@@ -228,7 +228,7 @@ static s32 vt596_access(struct i2c_adapter *adap, u16 addr,
                        goto exit_unsupported;
                if (read_write == I2C_SMBUS_READ)
                        outb_p(data->block[0], SMBHSTDAT0);
-               /* Fall through */
+               fallthrough;
        case I2C_SMBUS_BLOCK_DATA:
                outb_p(command, SMBHSTCMD);
                if (read_write == I2C_SMBUS_WRITE) {
@@ -489,9 +489,9 @@ static void __exit i2c_vt596_exit(void)
        }
 }
 
-MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
-             "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
-             "Jean Delvare <jdelvare@suse.de>");
+MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>");
+MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("vt82c596 SMBus driver");
 MODULE_LICENSE("GPL");
 
index bd9afa3..7b42a18 100644 (file)
@@ -151,7 +151,7 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status)
 
        case state_repeat_start:
                outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1);
-               /* fallthrough */
+               fallthrough;
 
        case state_quick:
                if (iface->address_byte & 1) {
index 26f03a1..34a9609 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_wakeirq.h>
@@ -181,6 +182,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 
        if (bri->prepare_recovery)
                bri->prepare_recovery(adap);
+       if (bri->pinctrl)
+               pinctrl_select_state(bri->pinctrl, bri->pins_gpio);
 
        /*
         * If we can set SDA, we will always create a STOP to ensure additional
@@ -236,6 +239,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 
        if (bri->unprepare_recovery)
                bri->unprepare_recovery(adap);
+       if (bri->pinctrl)
+               pinctrl_select_state(bri->pinctrl, bri->pins_default);
 
        return ret;
 }
@@ -251,13 +256,135 @@ int i2c_recover_bus(struct i2c_adapter *adap)
 }
 EXPORT_SYMBOL_GPL(i2c_recover_bus);
 
-static void i2c_init_recovery(struct i2c_adapter *adap)
+static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap)
+{
+       struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+       struct device *dev = &adap->dev;
+       struct pinctrl *p = bri->pinctrl;
+
+       /*
+        * we can't change states without pinctrl, so remove the states if
+        * populated
+        */
+       if (!p) {
+               bri->pins_default = NULL;
+               bri->pins_gpio = NULL;
+               return;
+       }
+
+       if (!bri->pins_default) {
+               bri->pins_default = pinctrl_lookup_state(p,
+                                                        PINCTRL_STATE_DEFAULT);
+               if (IS_ERR(bri->pins_default)) {
+                       dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n");
+                       bri->pins_default = NULL;
+               }
+       }
+       if (!bri->pins_gpio) {
+               bri->pins_gpio = pinctrl_lookup_state(p, "gpio");
+               if (IS_ERR(bri->pins_gpio))
+                       bri->pins_gpio = pinctrl_lookup_state(p, "recovery");
+
+               if (IS_ERR(bri->pins_gpio)) {
+                       dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n");
+                       bri->pins_gpio = NULL;
+               }
+       }
+
+       /* for pinctrl state changes, we need all the information */
+       if (bri->pins_default && bri->pins_gpio) {
+               dev_info(dev, "using pinctrl states for GPIO recovery");
+       } else {
+               bri->pinctrl = NULL;
+               bri->pins_default = NULL;
+               bri->pins_gpio = NULL;
+       }
+}
+
+static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap)
+{
+       struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+       struct device *dev = &adap->dev;
+       struct gpio_desc *gpiod;
+       int ret = 0;
+
+       /*
+        * don't touch the recovery information if the driver is not using
+        * generic SCL recovery
+        */
+       if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery)
+               return 0;
+
+       /*
+        * pins might be taken as GPIO, so we should inform pinctrl about
+        * this and move the state to GPIO
+        */
+       if (bri->pinctrl)
+               pinctrl_select_state(bri->pinctrl, bri->pins_gpio);
+
+       /*
+        * if there is incomplete or no recovery information, see if generic
+        * GPIO recovery is available
+        */
+       if (!bri->scl_gpiod) {
+               gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
+               if (PTR_ERR(gpiod) == -EPROBE_DEFER) {
+                       ret  = -EPROBE_DEFER;
+                       goto cleanup_pinctrl_state;
+               }
+               if (!IS_ERR(gpiod)) {
+                       bri->scl_gpiod = gpiod;
+                       bri->recover_bus = i2c_generic_scl_recovery;
+                       dev_info(dev, "using generic GPIOs for recovery\n");
+               }
+       }
+
+       /* SDA GPIOD line is optional, so we care about DEFER only */
+       if (!bri->sda_gpiod) {
+               /*
+                * We have SCL. Pull SCL low and wait a bit so that SDA glitches
+                * have no effect.
+                */
+               gpiod_direction_output(bri->scl_gpiod, 0);
+               udelay(10);
+               gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN);
+
+               /* Wait a bit in case of a SDA glitch, and then release SCL. */
+               udelay(10);
+               gpiod_direction_output(bri->scl_gpiod, 1);
+
+               if (PTR_ERR(gpiod) == -EPROBE_DEFER) {
+                       ret = -EPROBE_DEFER;
+                       goto cleanup_pinctrl_state;
+               }
+               if (!IS_ERR(gpiod))
+                       bri->sda_gpiod = gpiod;
+       }
+
+cleanup_pinctrl_state:
+       /* change the state of the pins back to their default state */
+       if (bri->pinctrl)
+               pinctrl_select_state(bri->pinctrl, bri->pins_default);
+
+       return ret;
+}
+
+static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
+{
+       i2c_gpio_init_pinctrl_recovery(adap);
+       return i2c_gpio_init_generic_recovery(adap);
+}
+
+static int i2c_init_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
        char *err_str;
 
        if (!bri)
-               return;
+               return 0;
+
+       if (i2c_gpio_init_recovery(adap) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        if (!bri->recover_bus) {
                err_str = "no recover_bus() found";
@@ -273,10 +400,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
                        if (gpiod_get_direction(bri->sda_gpiod) == 0)
                                bri->set_sda = set_sda_gpio_value;
                }
-               return;
-       }
-
-       if (bri->recover_bus == i2c_generic_scl_recovery) {
+       } else if (bri->recover_bus == i2c_generic_scl_recovery) {
                /* Generic SCL recovery */
                if (!bri->set_scl || !bri->get_scl) {
                        err_str = "no {get|set}_scl() found";
@@ -288,10 +412,12 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
                }
        }
 
-       return;
+       return 0;
  err:
        dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
        adap->bus_recovery_info = NULL;
+
+       return -EINVAL;
 }
 
 static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
@@ -319,11 +445,9 @@ static int i2c_device_probe(struct device *dev)
        if (!client)
                return 0;
 
-       driver = to_i2c_driver(dev->driver);
-
        client->irq = client->init_irq;
 
-       if (!client->irq && !driver->disable_i2c_core_irq_mapping) {
+       if (!client->irq) {
                int irq = -ENOENT;
 
                if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
@@ -349,6 +473,8 @@ static int i2c_device_probe(struct device *dev)
                client->irq = irq;
        }
 
+       driver = to_i2c_driver(dev->driver);
+
        /*
         * An I2C ID table is not mandatory, if and only if, a suitable OF
         * or ACPI ID table is supplied for the probing device.
@@ -1227,7 +1353,7 @@ static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
        if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
                return 0;
 
-       domain = irq_domain_create_linear(adap->dev.fwnode,
+       domain = irq_domain_create_linear(adap->dev.parent->fwnode,
                                          I2C_ADDR_7BITS_COUNT,
                                          &i2c_host_notify_irq_ops, adap);
        if (!domain)
@@ -1318,12 +1444,16 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
        if (res)
                goto out_reg;
 
-       dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
-
        pm_runtime_no_callbacks(&adap->dev);
        pm_suspend_ignore_children(&adap->dev, true);
        pm_runtime_enable(&adap->dev);
 
+       res = i2c_init_recovery(adap);
+       if (res == -EPROBE_DEFER)
+               goto out_reg;
+
+       dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+
 #ifdef CONFIG_I2C_COMPAT
        res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
                                       adap->dev.parent);
@@ -1332,8 +1462,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
                         "Failed to create compatibility class link\n");
 #endif
 
-       i2c_init_recovery(adap);
-
        /* create pre-declared device nodes */
        of_i2c_register_devices(adap);
        i2c_acpi_register_devices(adap);
index da020ac..6ceb11c 100644 (file)
@@ -761,8 +761,8 @@ static void __exit i2c_dev_exit(void)
        unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 }
 
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
-               "Simon G. Vogl <simon@tk.uni-linz.ac.at>");
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
 MODULE_DESCRIPTION("I2C /dev entries driver");
 MODULE_LICENSE("GPL");
 
index 593f2fd..5c7ae42 100644 (file)
@@ -66,7 +66,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
        case I2C_SLAVE_READ_PROCESSED:
                /* The previous byte made it to the bus, get next one */
                eeprom->buffer_idx++;
-               /* fallthrough */
+               fallthrough;
        case I2C_SLAVE_READ_REQUESTED:
                spin_lock(&eeprom->buffer_lock);
                *val = eeprom->buffer[eeprom->buffer_idx & eeprom->address_mask];
index ef0cd29..c36b4d2 100644 (file)
@@ -2751,7 +2751,7 @@ static int __init ib_core_init(void)
 
        ret = addr_init();
        if (ret) {
-               pr_warn("Could't init IB address resolution\n");
+               pr_warn("Couldn't init IB address resolution\n");
                goto err_ibnl;
        }
 
index 5e32f61..cc6b4be 100644 (file)
@@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
                 * complex (and doesn't gain us much performance in most use
                 * cases).
                 */
-               npages = get_user_pages_remote(owning_process, owning_mm,
+               npages = get_user_pages_remote(owning_mm,
                                user_virt, gup_num_pages,
                                flags, local_page_list, NULL, NULL);
                mmap_read_unlock(owning_mm);
index dad0df8..17ac8b7 100644 (file)
@@ -821,7 +821,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
        struct ib_event event;
        unsigned int flags;
 
-       if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+       if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+           rdma_is_kernel_res(&qp->ib_qp.res)) {
                flags = bnxt_re_lock_cqs(qp);
                bnxt_qplib_add_flush_qp(&qp->qplib_qp);
                bnxt_re_unlock_cqs(qp, flags);
index 9af82ff..73d197e 100644 (file)
@@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
        case IB_WR_ATOMIC_CMP_AND_SWP:
        case IB_WR_ATOMIC_FETCH_AND_ADD:
        case IB_WR_RDMA_WRITE:
+       case IB_WR_RDMA_WRITE_WITH_IMM:
                switch (prev->wr.opcode) {
                case IB_WR_TID_RDMA_WRITE:
                        req = wqe_to_tid_req(prev);
index da9888d..6edcbdc 100644 (file)
@@ -65,8 +65,6 @@
 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT            0x2
 #define HNS_ROCE_MIN_CQE_CNT                   16
 
-#define HNS_ROCE_RESERVED_SGE                  1
-
 #define HNS_ROCE_MAX_IRQ_NUM                   128
 
 #define HNS_ROCE_SGE_IN_WQE                    2
index 07b4c85..aeb3a6f 100644 (file)
@@ -535,7 +535,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
        roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
 
        dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
-       dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
+       dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
                ext_sdb_alept, ext_sdb_alful);
 }
 
index d296859..4cda95e 100644 (file)
@@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 
                wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
 
-               if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
+               if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
                        ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
                                  wr->num_sge, hr_qp->rq.max_gs);
                        ret = -EINVAL;
@@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
                if (wr->num_sge < hr_qp->rq.max_gs) {
                        dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
                        dseg->addr = 0;
-                       dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
                }
 
                /* rq support inline data */
@@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
                }
 
                if (wr->num_sge < srq->max_gs) {
-                       dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
-                       dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+                       dseg[i].len = 0;
+                       dseg[i].lkey = cpu_to_le32(0x100);
                        dseg[i].addr = 0;
                }
 
@@ -5070,7 +5069,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 
        attr->srq_limit = limit_wl;
        attr->max_wr = srq->wqe_cnt - 1;
-       attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+       attr->max_sge = srq->max_gs;
 
 out:
        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
index 1fb1c58..ac29be4 100644 (file)
@@ -92,9 +92,7 @@
 #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ         PAGE_SIZE
 #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED                0xFFFFF000
 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM         2
-#define HNS_ROCE_INVALID_LKEY                  0x0
-#define HNS_ROCE_INVALID_SGE_LENGTH            0x80000000
-
+#define HNS_ROCE_INVALID_LKEY                  0x100
 #define HNS_ROCE_CMQ_TX_TIMEOUT                        30000
 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE       2
 #define HNS_ROCE_V2_RSV_QPS                    8
index e94ca13..c063c45 100644 (file)
@@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
                return -EINVAL;
        }
 
-       hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
-                                             HNS_ROCE_RESERVED_SGE);
+       hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
 
        if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
                hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
                hr_qp->rq_inl_buf.wqe_cnt = 0;
 
        cap->max_recv_wr = cnt;
-       cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+       cap->max_recv_sge = hr_qp->rq.max_gs;
 
        return 0;
 }
index f40a000..b9e2dbd 100644 (file)
@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        spin_lock_init(&srq->lock);
 
        srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
-       srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
+       srq->max_gs = init_attr->attr.max_sge;
 
        if (udata) {
                ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
index c9abe1c..662e7fc 100644 (file)
@@ -120,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
                                                                IB_QPS_ERR,
                                                                NULL);
                                if (status) {
-                                       usnic_err("Failed to transistion qp grp %u from %s to %s\n",
+                                       usnic_err("Failed to transition qp grp %u from %s to %s\n",
                                                qp_grp->grp_id,
                                                usnic_ib_qp_grp_state_to_string
                                                (cur_state),
index a81e141..f699538 100644 (file)
@@ -16,7 +16,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
        if (dev->absinfo && test_bit(src, dev->absbit)) {
                dev->absinfo[dst] = dev->absinfo[src];
                dev->absinfo[dst].fuzz = 0;
-               dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
+               __set_bit(dst, dev->absbit);
        }
 }
 
index a7bc576..434d265 100644 (file)
@@ -247,7 +247,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
                        db9_saturn_write_sub(port, type, 3, powered, 0);
                        return data[0] = 0xe3;
                }
-               /* fall through */
+               fallthrough;
        default:
                return data[0];
        }
@@ -267,14 +267,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
                switch (data[j]) {
                case 0x16: /* multi controller (analog 4 axis) */
                        input_report_abs(dev, db9_abs[5], data[j + 6]);
-                       /* fall through */
+                       fallthrough;
                case 0x15: /* mission stick (analog 3 axis) */
                        input_report_abs(dev, db9_abs[3], data[j + 4]);
                        input_report_abs(dev, db9_abs[4], data[j + 5]);
-                       /* fall through */
+                       fallthrough;
                case 0x13: /* racing controller (analog 1 axis) */
                        input_report_abs(dev, db9_abs[2], data[j + 3]);
-                       /* fall through */
+                       fallthrough;
                case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
                case 0x02: /* digital pad (digital 2 axis + buttons) */
                        input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -368,7 +368,7 @@ static void db9_timer(struct timer_list *t)
                        input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
                        input_report_abs(dev2, ABS_Y, (data & DB9_DOWN  ? 0 : 1) - (data & DB9_UP   ? 0 : 1));
                        input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
-                       /* fall through */
+                       fallthrough;
 
                case DB9_MULTI_0802:
 
index e0a362b..88df68c 100644 (file)
@@ -485,7 +485,7 @@ static void gc_multi_process_packet(struct gc *gc)
                switch (pad->type) {
                case GC_MULTI2:
                        input_report_key(dev, BTN_THUMB, s & data[5]);
-                       /* fall through */
+                       fallthrough;
 
                case GC_MULTI:
                        input_report_abs(dev, ABS_X,
@@ -638,7 +638,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type,
 
                input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04);
                input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02);
-               /* fall through */
+               fallthrough;
 
        case GC_PSX_NEGCON:
        case GC_PSX_ANALOG:
@@ -872,7 +872,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
        case GC_SNES:
                for (i = 4; i < 8; i++)
                        input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
-               /* fall through */
+               fallthrough;
+
        case GC_NES:
                for (i = 0; i < 4; i++)
                        input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]);
@@ -880,7 +881,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
 
        case GC_MULTI2:
                input_set_capability(input_dev, EV_KEY, BTN_THUMB);
-               /* fall through */
+               fallthrough;
+
        case GC_MULTI:
                input_set_capability(input_dev, EV_KEY, BTN_TRIGGER);
                /* fall through */
index 1777e68..fac91ea 100644 (file)
@@ -656,16 +656,19 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
 
                        switch (i * m) {
                                case 60:
-                                       sw->number++;                   /* fall through */
+                                       sw->number++;
+                                       fallthrough;
                                case 45:                                /* Ambiguous packet length */
                                        if (j <= 40) {                  /* ID length less or eq 40 -> FSP */
                                case 43:
                                                sw->type = SW_ID_FSP;
                                                break;
                                        }
-                                       sw->number++;                   /* fall through */
+                                       sw->number++;
+                                       fallthrough;
                                case 30:
-                                       sw->number++;                   /* fall through */
+                                       sw->number++;
+                                       fallthrough;
                                case 15:
                                        sw->type = SW_ID_GP;
                                        break;
@@ -681,9 +684,11 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
                                                sw->type = SW_ID_PP;
                                        break;
                                case 66:
-                                       sw->bits = 3;                   /* fall through */
+                                       sw->bits = 3;
+                                       fallthrough;
                                case 198:
-                                       sw->length = 22;                /* fall through */
+                                       sw->length = 22;
+                                       fallthrough;
                                case 64:
                                        sw->type = SW_ID_3DP;
                                        if (j == 160)
index cf7cbcd..429411c 100644 (file)
@@ -146,7 +146,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
                                break;
                        }
                        spaceball->escape = 0;
-                       /* fall through */
+                       fallthrough;
                case 'M':
                case 'Q':
                case 'S':
@@ -154,7 +154,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
                                spaceball->escape = 0;
                                data &= 0x1f;
                        }
-                       /* fall through */
+                       fallthrough;
                default:
                        if (spaceball->escape)
                                spaceball->escape = 0;
@@ -220,13 +220,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
                        input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) |
                                BIT_MASK(BTN_B) | BIT_MASK(BTN_C) |
                                BIT_MASK(BTN_MODE);
-                       /* fall through */
+                       fallthrough;
                default:
                        input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) |
                                BIT_MASK(BTN_3) | BIT_MASK(BTN_4) |
                                BIT_MASK(BTN_5) | BIT_MASK(BTN_6) |
                                BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
-                       /* fall through */
+                       fallthrough;
                case SPACEBALL_3003C:
                        input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) |
                                BIT_MASK(BTN_8);
index e7d58e7..eb0e9cd 100644 (file)
@@ -1016,7 +1016,7 @@ static int adp5589_probe(struct i2c_client *client,
        switch (id->driver_data) {
        case ADP5585_02:
                kpad->support_row5 = true;
-               /* fall through */
+               fallthrough;
        case ADP5585_01:
                kpad->is_adp5585 = true;
                kpad->var = &const_adp5585;
index 6ec2826..edc613e 100644 (file)
@@ -1241,7 +1241,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
 
        case SERIO_8042_XL:
                atkbd->translated = true;
-               /* Fall through */
+               fallthrough;
 
        case SERIO_8042:
                if (serio->write)
index 53c9ff3..f2d4e4d 100644 (file)
@@ -574,7 +574,6 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                                IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING;
                        break;
                case EV_ACT_ANY:
-                       /* fall through */
                default:
                        /*
                         * For other cases, we are OK letting suspend/resume
index 305f016..8a36d78 100644 (file)
@@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(char *buffer,
 {
        pr_debug("%s()\n", __func__);
 
-       return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg);
+       return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg);
 }
 
 static int ati_remote2_set_mode_mask(const char *val,
@@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(char *buffer,
 {
        pr_debug("%s()\n", __func__);
 
-       return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg);
+       return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg);
 }
 
 static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK;
index c09b962..e413801 100644 (file)
@@ -663,12 +663,8 @@ static const struct usb_device_id cm109_usb_table[] = {
 static void cm109_usb_cleanup(struct cm109_dev *dev)
 {
        kfree(dev->ctl_req);
-       if (dev->ctl_data)
-               usb_free_coherent(dev->udev, USB_PKT_LEN,
-                                 dev->ctl_data, dev->ctl_dma);
-       if (dev->irq_data)
-               usb_free_coherent(dev->udev, USB_PKT_LEN,
-                                 dev->irq_data, dev->irq_dma);
+       usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma);
+       usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma);
 
        usb_free_urb(dev->urb_irq);     /* parameter validation in core/urb */
        usb_free_urb(dev->urb_ctl);     /* parameter validation in core/urb */
index d8dbfc0..08b9b5c 100644 (file)
@@ -335,7 +335,7 @@ static int ims_pcu_setup_gamepad(struct ims_pcu *pcu)
 err_free_mem:
        input_free_device(input);
        kfree(gamepad);
-       return -ENOMEM;
+       return error;
 }
 
 static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu)
index 6699eb1..a348247 100644 (file)
@@ -575,8 +575,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
 
                case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
                        engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
-
-                       /* fall through */
+                       fallthrough;
 
                case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
                        engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
@@ -731,14 +730,12 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
                                iqs269->switches[i].code = val;
                                iqs269->switches[i].enabled = true;
                        }
-
-                       /* fall through */
+                       fallthrough;
 
                case IQS269_CHx_HALL_INACTIVE:
                        if (iqs269->hall_enable)
                                break;
-
-                       /* fall through */
+                       fallthrough;
 
                default:
                        iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
@@ -1143,14 +1140,12 @@ static int iqs269_input_init(struct iqs269_private *iqs269)
                                                            sw_code,
                                                            state & BIT(j));
                                }
-
-                               /* fall through */
+                               fallthrough;
 
                        case IQS269_CHx_HALL_INACTIVE:
                                if (iqs269->hall_enable)
                                        continue;
-
-                               /* fall through */
+                               fallthrough;
 
                        default:
                                if (keycode != KEY_RESERVED)
@@ -1273,14 +1268,12 @@ static int iqs269_report(struct iqs269_private *iqs269)
                                        input_report_switch(iqs269->keypad,
                                                            sw_code,
                                                            state & BIT(j));
-
-                               /* fall through */
+                               fallthrough;
 
                        case IQS269_CHx_HALL_INACTIVE:
                                if (iqs269->hall_enable)
                                        continue;
-
-                               /* fall through */
+                               fallthrough;
 
                        default:
                                input_report_key(iqs269->keypad, keycode,
index 8ceaf7d..81e777a 100644 (file)
@@ -190,7 +190,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
 
        default:
                dev_err(&pdev->dev, "Failed to request direction pwm: %d", err);
-               /* Fall through */
+               fallthrough;
 
        case -EPROBE_DEFER:
                return err;
index a1bba72..4ff5cd2 100644 (file)
@@ -124,7 +124,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
        switch (mtouch->event_type) {
        case XENKBD_MT_EV_DOWN:
                input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true);
-               /* fall through */
+               fallthrough;
 
        case XENKBD_MT_EV_MOTION:
                input_report_abs(info->mtouch, ABS_MT_POSITION_X,
@@ -524,7 +524,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
        case XenbusStateClosed:
                if (dev->state == XenbusStateClosed)
                        break;
-               /* fall through - Missed the backend's CLOSING state */
+               fallthrough;    /* Missed the backend's CLOSING state */
        case XenbusStateClosing:
                xenbus_frontend_closed(dev);
                break;
index 34700ed..b067bfd 100644 (file)
@@ -1929,7 +1929,7 @@ static int alps_monitor_mode(struct psmouse *psmouse, bool enable)
 static int alps_absolute_mode_v6(struct psmouse *psmouse)
 {
        u16 reg_val = 0x181;
-       int ret = -1;
+       int ret;
 
        /* enter monitor mode, to write the register */
        if (alps_monitor_mode(psmouse, true))
index 3f06e8a..bfa2665 100644 (file)
@@ -458,7 +458,7 @@ static int atp_status_check(struct urb *urb)
                                dev->info->datalen, dev->urb->actual_length);
                        dev->overflow_warned = true;
                }
-               /* fall through */
+               fallthrough;
        case -ECONNRESET:
        case -ENOENT:
        case -ESHUTDOWN:
index 00e395d..a0361f9 100644 (file)
@@ -1067,7 +1067,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
                        return error;
                }
 
-       /* Fall through */
+               fallthrough;
        case CYAPA_STATE_BL_IDLE:
                /* Try to get firmware version in bootloader mode. */
                cyapa_gen3_bl_query_data(cyapa);
@@ -1078,7 +1078,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
                        return error;
                }
 
-       /* Fall through */
+               fallthrough;
        case CYAPA_STATE_OP:
                /*
                 * Reading query data before going back to the full mode
index 7f012bf..bb3a63d 100644 (file)
@@ -2554,7 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
                }
 
                cyapa->state = CYAPA_STATE_GEN5_APP;
-               /* fall through */
+               fallthrough;
 
        case CYAPA_STATE_GEN5_APP:
                /*
index c1b524a..7eba66f 100644 (file)
@@ -680,7 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
                }
 
                cyapa->state = CYAPA_STATE_GEN6_APP;
-               /* fall through */
+               fallthrough;
 
        case CYAPA_STATE_GEN6_APP:
                /*
index a9074ac..c75b00c 100644 (file)
@@ -26,6 +26,8 @@
 
 #define ETP_CALIBRATE_MAX_LEN  3
 
+#define ETP_FEATURE_REPORT_MK  BIT(0)
+
 /* IAP Firmware handling */
 #define ETP_PRODUCT_ID_FORMAT_STRING   "%d.0"
 #define ETP_FW_NAME            "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
@@ -33,6 +35,8 @@
 #define ETP_FW_IAP_PAGE_ERR    (1 << 5)
 #define ETP_FW_IAP_INTF_ERR    (1 << 4)
 #define ETP_FW_PAGE_SIZE       64
+#define ETP_FW_PAGE_SIZE_128   128
+#define ETP_FW_PAGE_SIZE_512   512
 #define ETP_FW_SIGNATURE_SIZE  6
 
 struct i2c_client;
@@ -55,8 +59,9 @@ struct elan_transport_ops {
        int (*get_baseline_data)(struct i2c_client *client,
                                 bool max_baseliune, u8 *value);
 
-       int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
-       int (*get_sm_version)(struct i2c_client *client,
+       int (*get_version)(struct i2c_client *client, u8 pattern, bool iap,
+                          u8 *version);
+       int (*get_sm_version)(struct i2c_client *client, u8 pattern,
                              u16 *ic_type, u8 *version, u8 *clickpad);
        int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
        int (*get_product_id)(struct i2c_client *client, u16 *id);
@@ -72,13 +77,18 @@ struct elan_transport_ops {
        int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode);
        int (*iap_reset)(struct i2c_client *client);
 
-       int (*prepare_fw_update)(struct i2c_client *client);
-       int (*write_fw_block)(struct i2c_client *client,
+       int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type,
+                                u8 iap_version);
+       int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size,
                              const u8 *page, u16 checksum, int idx);
        int (*finish_fw_update)(struct i2c_client *client,
                                struct completion *reset_done);
 
-       int (*get_report)(struct i2c_client *client, u8 *report);
+       int (*get_report_features)(struct i2c_client *client, u8 pattern,
+                                  unsigned int *features,
+                                  unsigned int *report_len);
+       int (*get_report)(struct i2c_client *client, u8 *report,
+                         unsigned int report_len);
        int (*get_pressure_adjustment)(struct i2c_client *client,
                                       int *adjustment);
        int (*get_pattern)(struct i2c_client *client, u8 *pattern);
index 6291fb5..c599e21 100644 (file)
 #define ETP_MAX_FINGERS                5
 #define ETP_FINGER_DATA_LEN    5
 #define ETP_REPORT_ID          0x5D
+#define ETP_REPORT_ID2         0x60    /* High precision report */
 #define ETP_TP_REPORT_ID       0x5E
 #define ETP_REPORT_ID_OFFSET   2
 #define ETP_TOUCH_INFO_OFFSET  3
 #define ETP_FINGER_DATA_OFFSET 4
 #define ETP_HOVER_INFO_OFFSET  30
-#define ETP_MAX_REPORT_LEN     34
+#define ETP_MK_DATA_OFFSET     33      /* For high precision reports */
+#define ETP_MAX_REPORT_LEN     39
 
 /* The main device structure */
 struct elan_tp_data {
@@ -85,11 +87,14 @@ struct elan_tp_data {
        u8                      sm_version;
        u8                      iap_version;
        u16                     fw_checksum;
+       unsigned int            report_features;
+       unsigned int            report_len;
        int                     pressure_adjustment;
        u8                      mode;
        u16                     ic_type;
        u16                     fw_validpage_count;
-       u16                     fw_signature_address;
+       u16                     fw_page_size;
+       u32                     fw_signature_address;
 
        bool                    irq_wake;
 
@@ -100,8 +105,8 @@ struct elan_tp_data {
        bool                    middle_button;
 };
 
-static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
-                          u16 *signature_address)
+static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
+                          u32 *signature_address, u16 *page_size)
 {
        switch (ic_type) {
        case 0x00:
@@ -126,16 +131,37 @@ static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
        case 0x10:
                *validpage_count = 1024;
                break;
+       case 0x11:
+               *validpage_count = 1280;
+               break;
+       case 0x13:
+               *validpage_count = 2048;
+               break;
+       case 0x14:
+       case 0x15:
+               *validpage_count = 1024;
+               break;
        default:
                /* unknown ic type clear value */
                *validpage_count = 0;
                *signature_address = 0;
+               *page_size = 0;
                return -ENXIO;
        }
 
        *signature_address =
                (*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
 
+       if ((ic_type == 0x14 || ic_type == 0x15) && iap_version >= 2) {
+               *validpage_count /= 8;
+               *page_size = ETP_FW_PAGE_SIZE_512;
+       } else if (ic_type >= 0x0D && iap_version >= 1) {
+               *validpage_count /= 2;
+               *page_size = ETP_FW_PAGE_SIZE_128;
+       } else {
+               *page_size = ETP_FW_PAGE_SIZE;
+       }
+
        return 0;
 }
 
@@ -215,8 +241,13 @@ static int elan_query_product(struct elan_tp_data *data)
        if (error)
                return error;
 
-       error = data->ops->get_sm_version(data->client, &data->ic_type,
-                                         &data->sm_version, &data->clickpad);
+       error = data->ops->get_pattern(data->client, &data->pattern);
+       if (error)
+               return error;
+
+       error = data->ops->get_sm_version(data->client, data->pattern,
+                                         &data->ic_type, &data->sm_version,
+                                         &data->clickpad);
        if (error)
                return error;
 
@@ -312,9 +343,9 @@ static int elan_initialize(struct elan_tp_data *data)
 static int elan_query_device_info(struct elan_tp_data *data)
 {
        int error;
-       u16 ic_type;
 
-       error = data->ops->get_version(data->client, false, &data->fw_version);
+       error = data->ops->get_version(data->client, data->pattern, false,
+                                      &data->fw_version);
        if (error)
                return error;
 
@@ -323,7 +354,8 @@ static int elan_query_device_info(struct elan_tp_data *data)
        if (error)
                return error;
 
-       error = data->ops->get_version(data->client, true, &data->iap_version);
+       error = data->ops->get_version(data->client, data->pattern,
+                                      true, &data->iap_version);
        if (error)
                return error;
 
@@ -332,17 +364,16 @@ static int elan_query_device_info(struct elan_tp_data *data)
        if (error)
                return error;
 
-       error = data->ops->get_pattern(data->client, &data->pattern);
+       error = data->ops->get_report_features(data->client, data->pattern,
+                                              &data->report_features,
+                                              &data->report_len);
        if (error)
                return error;
 
-       if (data->pattern == 0x01)
-               ic_type = data->ic_type;
-       else
-               ic_type = data->iap_version;
-
-       error = elan_get_fwinfo(ic_type, &data->fw_validpage_count,
-                               &data->fw_signature_address);
+       error = elan_get_fwinfo(data->ic_type, data->iap_version,
+                               &data->fw_validpage_count,
+                               &data->fw_signature_address,
+                               &data->fw_page_size);
        if (error)
                dev_warn(&data->client->dev,
                         "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
@@ -351,16 +382,21 @@ static int elan_query_device_info(struct elan_tp_data *data)
        return 0;
 }
 
-static unsigned int elan_convert_resolution(u8 val)
+static unsigned int elan_convert_resolution(u8 val, u8 pattern)
 {
        /*
-        * (value from firmware) * 10 + 790 = dpi
-        *
+        * pattern <= 0x01:
+        *      (value from firmware) * 10 + 790 = dpi
+        * else
+        *      ((value from firmware) + 3) * 100 = dpi
+        */
+       int res = pattern <= 0x01 ?
+               (int)(char)val * 10 + 790 : ((int)(char)val + 3) * 100;
+       /*
         * We also have to convert dpi to dots/mm (*10/254 to avoid floating
         * point).
         */
-
-       return ((int)(char)val * 10 + 790) * 10 / 254;
+       return res * 10 / 254;
 }
 
 static int elan_query_device_parameters(struct elan_tp_data *data)
@@ -409,8 +445,8 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
                if (error)
                        return error;
 
-               data->x_res = elan_convert_resolution(hw_x_res);
-               data->y_res = elan_convert_resolution(hw_y_res);
+               data->x_res = elan_convert_resolution(hw_x_res, data->pattern);
+               data->y_res = elan_convert_resolution(hw_y_res, data->pattern);
        } else {
                data->x_res = (data->max_x + 1) / x_mm;
                data->y_res = (data->max_y + 1) / y_mm;
@@ -430,14 +466,14 @@ static int elan_query_device_parameters(struct elan_tp_data *data)
  * IAP firmware updater related routines
  **********************************************************
  */
-static int elan_write_fw_block(struct elan_tp_data *data,
+static int elan_write_fw_block(struct elan_tp_data *data, u16 page_size,
                               const u8 *page, u16 checksum, int idx)
 {
        int retry = ETP_RETRY_COUNT;
        int error;
 
        do {
-               error = data->ops->write_fw_block(data->client,
+               error = data->ops->write_fw_block(data->client, page_size,
                                                  page, checksum, idx);
                if (!error)
                        return 0;
@@ -460,21 +496,23 @@ static int __elan_update_firmware(struct elan_tp_data *data,
        u16 boot_page_count;
        u16 sw_checksum = 0, fw_checksum = 0;
 
-       error = data->ops->prepare_fw_update(client);
+       error = data->ops->prepare_fw_update(client, data->ic_type,
+                                            data->iap_version);
        if (error)
                return error;
 
        iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
 
-       boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
+       boot_page_count = (iap_start_addr * 2) / data->fw_page_size;
        for (i = boot_page_count; i < data->fw_validpage_count; i++) {
                u16 checksum = 0;
-               const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
+               const u8 *page = &fw->data[i * data->fw_page_size];
 
-               for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2)
+               for (j = 0; j < data->fw_page_size; j += 2)
                        checksum += ((page[j + 1] << 8) | page[j]);
 
-               error = elan_write_fw_block(data, page, checksum, i);
+               error = elan_write_fw_block(data, data->fw_page_size,
+                                           page, checksum, i);
                if (error) {
                        dev_err(dev, "write page %d fail: %d\n", i, error);
                        return error;
@@ -886,24 +924,22 @@ static const struct attribute_group *elan_sysfs_groups[] = {
  * Elan isr functions
  ******************************************************************
  */
-static void elan_report_contact(struct elan_tp_data *data,
-                               int contact_num, bool contact_valid,
-                               u8 *finger_data)
+static void elan_report_contact(struct elan_tp_data *data, int contact_num,
+                               bool contact_valid, bool high_precision,
+                               u8 *packet, u8 *finger_data)
 {
        struct input_dev *input = data->input;
        unsigned int pos_x, pos_y;
-       unsigned int pressure, mk_x, mk_y;
-       unsigned int area_x, area_y, major, minor;
-       unsigned int scaled_pressure;
+       unsigned int pressure, scaled_pressure;
 
        if (contact_valid) {
-               pos_x = ((finger_data[0] & 0xf0) << 4) |
-                                               finger_data[1];
-               pos_y = ((finger_data[0] & 0x0f) << 8) |
-                                               finger_data[2];
-               mk_x = (finger_data[3] & 0x0f);
-               mk_y = (finger_data[3] >> 4);
-               pressure = finger_data[4];
+               if (high_precision) {
+                       pos_x = get_unaligned_be16(&finger_data[0]);
+                       pos_y = get_unaligned_be16(&finger_data[2]);
+               } else {
+                       pos_x = ((finger_data[0] & 0xf0) << 4) | finger_data[1];
+                       pos_y = ((finger_data[0] & 0x0f) << 8) | finger_data[2];
+               }
 
                if (pos_x > data->max_x || pos_y > data->max_y) {
                        dev_dbg(input->dev.parent,
@@ -913,18 +949,8 @@ static void elan_report_contact(struct elan_tp_data *data,
                        return;
                }
 
-               /*
-                * To avoid treating large finger as palm, let's reduce the
-                * width x and y per trace.
-                */
-               area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
-               area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
-
-               major = max(area_x, area_y);
-               minor = min(area_x, area_y);
-
+               pressure = finger_data[4];
                scaled_pressure = pressure + data->pressure_adjustment;
-
                if (scaled_pressure > ETP_MAX_PRESSURE)
                        scaled_pressure = ETP_MAX_PRESSURE;
 
@@ -933,16 +959,37 @@ static void elan_report_contact(struct elan_tp_data *data,
                input_report_abs(input, ABS_MT_POSITION_X, pos_x);
                input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
                input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
-               input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
-               input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
-               input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
+
+               if (data->report_features & ETP_FEATURE_REPORT_MK) {
+                       unsigned int mk_x, mk_y, area_x, area_y;
+                       u8 mk_data = high_precision ?
+                               packet[ETP_MK_DATA_OFFSET + contact_num] :
+                               finger_data[3];
+
+                       mk_x = mk_data & 0x0f;
+                       mk_y = mk_data >> 4;
+
+                       /*
+                        * To avoid treating large finger as palm, let's reduce
+                        * the width x and y per trace.
+                        */
+                       area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE);
+                       area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE);
+
+                       input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
+                       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                                        max(area_x, area_y));
+                       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                                        min(area_x, area_y));
+               }
        } else {
                input_mt_slot(input, contact_num);
                input_mt_report_slot_inactive(input);
        }
 }
 
-static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
+static void elan_report_absolute(struct elan_tp_data *data, u8 *packet,
+                                bool high_precision)
 {
        struct input_dev *input = data->input;
        u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET];
@@ -953,11 +1000,12 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
 
        pm_wakeup_event(&data->client->dev, 0);
 
-       hover_event = hover_info & 0x40;
-       for (i = 0; i < ETP_MAX_FINGERS; i++) {
-               contact_valid = tp_info & (1U << (3 + i));
-               elan_report_contact(data, i, contact_valid, finger_data);
+       hover_event = hover_info & BIT(6);
 
+       for (i = 0; i < ETP_MAX_FINGERS; i++) {
+               contact_valid = tp_info & BIT(3 + i);
+               elan_report_contact(data, i, contact_valid, high_precision,
+                                   packet, finger_data);
                if (contact_valid)
                        finger_data += ETP_FINGER_DATA_LEN;
        }
@@ -1015,13 +1063,16 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
                goto out;
        }
 
-       error = data->ops->get_report(data->client, report);
+       error = data->ops->get_report(data->client, report, data->report_len);
        if (error)
                goto out;
 
        switch (report[ETP_REPORT_ID_OFFSET]) {
        case ETP_REPORT_ID:
-               elan_report_absolute(data, report);
+               elan_report_absolute(data, report, false);
+               break;
+       case ETP_REPORT_ID2:
+               elan_report_absolute(data, report, true);
                break;
        case ETP_TP_REPORT_ID:
                elan_report_trackpoint(data, report);
@@ -1112,7 +1163,9 @@ static int elan_setup_input_device(struct elan_tp_data *data)
        input_abs_set_res(input, ABS_X, data->x_res);
        input_abs_set_res(input, ABS_Y, data->y_res);
        input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
-       input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
+       if (data->report_features & ETP_FEATURE_REPORT_MK)
+               input_set_abs_params(input, ABS_TOOL_WIDTH,
+                                    0, ETP_FINGER_WIDTH, 0, 0);
        input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
 
        /* And MT parameters */
@@ -1122,10 +1175,12 @@ static int elan_setup_input_device(struct elan_tp_data *data)
        input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res);
        input_set_abs_params(input, ABS_MT_PRESSURE, 0,
                             ETP_MAX_PRESSURE, 0, 0);
-       input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
-                            ETP_FINGER_WIDTH * max_width, 0, 0);
-       input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
-                            ETP_FINGER_WIDTH * min_width, 0, 0);
+       if (data->report_features & ETP_FEATURE_REPORT_MK) {
+               input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+                                    0, ETP_FINGER_WIDTH * max_width, 0, 0);
+               input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+                                    0, ETP_FINGER_WIDTH * min_width, 0, 0);
+       }
 
        data->input = input;
 
index 058b35b..5a496d4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/sched.h>
 #include <asm/unaligned.h>
 
@@ -43,6 +44,8 @@
 #define ETP_I2C_RESOLUTION_CMD         0x0108
 #define ETP_I2C_PRESSURE_CMD           0x010A
 #define ETP_I2C_IAP_VERSION_CMD                0x0110
+#define ETP_I2C_IC_TYPE_P0_CMD         0x0110
+#define ETP_I2C_IAP_VERSION_P0_CMD     0x0111
 #define ETP_I2C_SET_CMD                        0x0300
 #define ETP_I2C_POWER_CMD              0x0307
 #define ETP_I2C_FW_CHECKSUM_CMD                0x030F
 #define ETP_I2C_CALIBRATE_CMD          0x0316
 #define ETP_I2C_MAX_BASELINE_CMD       0x0317
 #define ETP_I2C_MIN_BASELINE_CMD       0x0318
+#define ETP_I2C_IAP_TYPE_REG           0x0040
+#define ETP_I2C_IAP_TYPE_CMD           0x0304
 
 #define ETP_I2C_REPORT_LEN             34
+#define ETP_I2C_REPORT_LEN_ID2         39
+#define ETP_I2C_REPORT_MAX_LEN         39
 #define ETP_I2C_DESC_LENGTH            30
 #define ETP_I2C_REPORT_DESC_LENGTH     158
 #define ETP_I2C_INF_LENGTH             2
@@ -249,56 +256,52 @@ static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern)
                dev_err(&client->dev, "failed to get pattern: %d\n", error);
                return error;
        }
-       *pattern = val[1];
+
+       /*
+        * Not all versions of firmware implement "get pattern" command.
+        * When this command is not implemented the device will respond
+        * with 0xFF 0xFF, which we will treat as "old" pattern 0.
+        */
+       *pattern = val[0] == 0xFF && val[1] == 0xFF ? 0 : val[1];
 
        return 0;
 }
 
 static int elan_i2c_get_version(struct i2c_client *client,
-                               bool iap, u8 *version)
+                               u8 pattern, bool iap, u8 *version)
 {
        int error;
-       u8 pattern_ver;
+       u16 cmd;
        u8 val[3];
 
-       error = elan_i2c_get_pattern(client, &pattern_ver);
-       if (error) {
-               dev_err(&client->dev, "failed to get pattern version\n");
-               return error;
-       }
+       if (!iap)
+               cmd = ETP_I2C_FW_VERSION_CMD;
+       else if (pattern == 0)
+               cmd = ETP_I2C_IAP_VERSION_P0_CMD;
+       else
+               cmd = ETP_I2C_IAP_VERSION_CMD;
 
-       error = elan_i2c_read_cmd(client,
-                                 iap ? ETP_I2C_IAP_VERSION_CMD :
-                                       ETP_I2C_FW_VERSION_CMD,
-                                 val);
+       error = elan_i2c_read_cmd(client, cmd, val);
        if (error) {
                dev_err(&client->dev, "failed to get %s version: %d\n",
                        iap ? "IAP" : "FW", error);
                return error;
        }
 
-       if (pattern_ver == 0x01)
+       if (pattern >= 0x01)
                *version = iap ? val[1] : val[0];
        else
                *version = val[0];
        return 0;
 }
 
-static int elan_i2c_get_sm_version(struct i2c_client *client,
-                                  u16 *ic_type, u8 *version,
-                                  u8 *clickpad)
+static int elan_i2c_get_sm_version(struct i2c_client *client, u8 pattern,
+                                  u16 *ic_type, u8 *version, u8 *clickpad)
 {
        int error;
-       u8 pattern_ver;
        u8 val[3];
 
-       error = elan_i2c_get_pattern(client, &pattern_ver);
-       if (error) {
-               dev_err(&client->dev, "failed to get pattern version\n");
-               return error;
-       }
-
-       if (pattern_ver == 0x01) {
+       if (pattern >= 0x01) {
                error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val);
                if (error) {
                        dev_err(&client->dev, "failed to get ic type: %d\n",
@@ -324,7 +327,14 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
                        return error;
                }
                *version = val[0];
-               *ic_type = val[1];
+
+               error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_P0_CMD, val);
+               if (error) {
+                       dev_err(&client->dev, "failed to get ic type: %d\n",
+                               error);
+                       return error;
+               }
+               *ic_type = val[0];
 
                error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD,
                                          val);
@@ -386,7 +396,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
                return error;
        }
 
-       *max_x = le16_to_cpup((__le16 *)val) & 0x0fff;
+       *max_x = le16_to_cpup((__le16 *)val);
 
        error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val);
        if (error) {
@@ -394,7 +404,7 @@ static int elan_i2c_get_max(struct i2c_client *client,
                return error;
        }
 
-       *max_y = le16_to_cpup((__le16 *)val) & 0x0fff;
+       *max_y = le16_to_cpup((__le16 *)val);
 
        return 0;
 }
@@ -507,7 +517,43 @@ static int elan_i2c_set_flash_key(struct i2c_client *client)
        return 0;
 }
 
-static int elan_i2c_prepare_fw_update(struct i2c_client *client)
+static int elan_read_write_iap_type(struct i2c_client *client)
+{
+       int error;
+       u16 constant;
+       u8 val[3];
+       int retry = 3;
+
+       do {
+               error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD,
+                                          ETP_I2C_IAP_TYPE_REG);
+               if (error) {
+                       dev_err(&client->dev,
+                               "cannot write iap type: %d\n", error);
+                       return error;
+               }
+
+               error = elan_i2c_read_cmd(client, ETP_I2C_IAP_TYPE_CMD, val);
+               if (error) {
+                       dev_err(&client->dev,
+                               "failed to read iap type register: %d\n",
+                               error);
+                       return error;
+               }
+               constant = le16_to_cpup((__le16 *)val);
+               dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant);
+
+               if (constant == ETP_I2C_IAP_TYPE_REG)
+                       return 0;
+
+       } while (--retry > 0);
+
+       dev_err(&client->dev, "cannot set iap type\n");
+       return -EIO;
+}
+
+static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+                                     u8 iap_version)
 {
        struct device *dev = &client->dev;
        int error;
@@ -547,6 +593,12 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
                return -EIO;
        }
 
+       if (ic_type >= 0x0D && iap_version >= 1) {
+               error = elan_read_write_iap_type(client);
+               if (error)
+                       return error;
+       }
+
        /* Set flash key again */
        error = elan_i2c_set_flash_key(client);
        if (error)
@@ -572,57 +624,64 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client)
        return 0;
 }
 
-static int elan_i2c_write_fw_block(struct i2c_client *client,
+static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size,
                                   const u8 *page, u16 checksum, int idx)
 {
        struct device *dev = &client->dev;
-       u8 page_store[ETP_FW_PAGE_SIZE + 4];
+       u8 *page_store;
        u8 val[3];
        u16 result;
        int ret, error;
 
+       page_store = kmalloc(fw_page_size + 4, GFP_KERNEL);
+       if (!page_store)
+               return -ENOMEM;
+
        page_store[0] = ETP_I2C_IAP_REG_L;
        page_store[1] = ETP_I2C_IAP_REG_H;
-       memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE);
+       memcpy(&page_store[2], page, fw_page_size);
        /* recode checksum at last two bytes */
-       put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]);
+       put_unaligned_le16(checksum, &page_store[fw_page_size + 2]);
 
-       ret = i2c_master_send(client, page_store, sizeof(page_store));
-       if (ret != sizeof(page_store)) {
+       ret = i2c_master_send(client, page_store, fw_page_size + 4);
+       if (ret != fw_page_size + 4) {
                error = ret < 0 ? ret : -EIO;
                dev_err(dev, "Failed to write page %d: %d\n", idx, error);
-               return error;
+               goto exit;
        }
 
        /* Wait for F/W to update one page ROM data. */
-       msleep(35);
+       msleep(fw_page_size == ETP_FW_PAGE_SIZE_512 ? 50 : 35);
 
        error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
        if (error) {
                dev_err(dev, "Failed to read IAP write result: %d\n", error);
-               return error;
+               goto exit;
        }
 
        result = le16_to_cpup((__le16 *)val);
        if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) {
                dev_err(dev, "IAP reports failed write: %04hx\n",
                        result);
-               return -EIO;
+               error = -EIO;
+               goto exit;
        }
 
-       return 0;
+exit:
+       kfree(page_store);
+       return error;
 }
 
 static int elan_i2c_finish_fw_update(struct i2c_client *client,
                                     struct completion *completion)
 {
        struct device *dev = &client->dev;
-       int error;
+       int error = 0;
        int len;
-       u8 buffer[ETP_I2C_REPORT_LEN];
+       u8 buffer[ETP_I2C_REPORT_MAX_LEN];
 
-       len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
-       if (len != ETP_I2C_REPORT_LEN) {
+       len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_MAX_LEN);
+       if (len <= 0) {
                error = len < 0 ? len : -EIO;
                dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
                        error, len);
@@ -656,20 +715,31 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
        return 0;
 }
 
-static int elan_i2c_get_report(struct i2c_client *client, u8 *report)
+static int elan_i2c_get_report_features(struct i2c_client *client, u8 pattern,
+                                       unsigned int *features,
+                                       unsigned int *report_len)
+{
+       *features = ETP_FEATURE_REPORT_MK;
+       *report_len = pattern <= 0x01 ?
+                       ETP_I2C_REPORT_LEN : ETP_I2C_REPORT_LEN_ID2;
+       return 0;
+}
+
+static int elan_i2c_get_report(struct i2c_client *client,
+                              u8 *report, unsigned int report_len)
 {
        int len;
 
-       len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN);
+       len = i2c_master_recv(client, report, report_len);
        if (len < 0) {
                dev_err(&client->dev, "failed to read report data: %d\n", len);
                return len;
        }
 
-       if (len != ETP_I2C_REPORT_LEN) {
+       if (len != report_len) {
                dev_err(&client->dev,
                        "wrong report length (%d vs %d expected)\n",
-                       len, ETP_I2C_REPORT_LEN);
+                       len, report_len);
                return -EIO;
        }
 
@@ -706,5 +776,6 @@ const struct elan_transport_ops elan_i2c_ops = {
 
        .get_pattern            = elan_i2c_get_pattern,
 
+       .get_report_features    = elan_i2c_get_report_features,
        .get_report             = elan_i2c_get_report,
 };
index 8c3185d..8ff8237 100644 (file)
@@ -147,7 +147,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
 }
 
 static int elan_smbus_get_version(struct i2c_client *client,
-                                 bool iap, u8 *version)
+                                 u8 pattern, bool iap, u8 *version)
 {
        int error;
        u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -166,9 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_smbus_get_sm_version(struct i2c_client *client,
-                                    u16 *ic_type, u8 *version,
-                                    u8 *clickpad)
+static int elan_smbus_get_sm_version(struct i2c_client *client, u8 pattern,
+                                    u16 *ic_type, u8 *version, u8 *clickpad)
 {
        int error;
        u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
@@ -340,7 +339,8 @@ static int elan_smbus_set_flash_key(struct i2c_client *client)
        return 0;
 }
 
-static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+                                       u8 iap_version)
 {
        struct device *dev = &client->dev;
        int len;
@@ -414,7 +414,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
 }
 
 
-static int elan_smbus_write_fw_block(struct i2c_client *client,
+static int elan_smbus_write_fw_block(struct i2c_client *client, u16 fw_page_size,
                                     const u8 *page, u16 checksum, int idx)
 {
        struct device *dev = &client->dev;
@@ -429,7 +429,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
         */
        error = i2c_smbus_write_block_data(client,
                                           ETP_SMBUS_WRITE_FW_BLOCK,
-                                          ETP_FW_PAGE_SIZE / 2,
+                                          fw_page_size / 2,
                                           page);
        if (error) {
                dev_err(dev, "Failed to write page %d (part %d): %d\n",
@@ -439,8 +439,8 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
 
        error = i2c_smbus_write_block_data(client,
                                           ETP_SMBUS_WRITE_FW_BLOCK,
-                                          ETP_FW_PAGE_SIZE / 2,
-                                          page + ETP_FW_PAGE_SIZE / 2);
+                                          fw_page_size / 2,
+                                          page + fw_page_size / 2);
        if (error) {
                dev_err(dev, "Failed to write page %d (part %d): %d\n",
                        idx, 2, error);
@@ -469,7 +469,21 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
        return 0;
 }
 
-static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+static int elan_smbus_get_report_features(struct i2c_client *client, u8 pattern,
+                                         unsigned int *features,
+                                         unsigned int *report_len)
+{
+       /*
+        * SMBus controllers with pattern 2 lack area info, as newer
+        * high-precision packets use that space for coordinates.
+        */
+       *features = pattern <= 0x01 ? ETP_FEATURE_REPORT_MK : 0;
+       *report_len = ETP_SMBUS_REPORT_LEN;
+       return 0;
+}
+
+static int elan_smbus_get_report(struct i2c_client *client,
+                                u8 *report, unsigned int report_len)
 {
        int len;
 
@@ -534,6 +548,7 @@ const struct elan_transport_ops elan_smbus_ops = {
        .write_fw_block         = elan_smbus_write_fw_block,
        .finish_fw_update       = elan_smbus_finish_fw_update,
 
+       .get_report_features    = elan_smbus_get_report_features,
        .get_report             = elan_smbus_get_report,
        .get_pattern            = elan_smbus_get_pattern,
 };
index 2d8434b..90f8765 100644 (file)
@@ -383,7 +383,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
                 */
                if (packet[3] & 0x80)
                        fingers = 4;
-               /* fall through */
+               fallthrough;
        case 1:
                /*
                 * byte 1:  .   .   .   .  x11 x10 x9  x8
@@ -1146,7 +1146,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        case 2:
                __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
                __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
-               /* fall through */
+               fallthrough;
        case 3:
                if (info->hw_version == 3)
                        elantech_set_buttonpad_prop(psmouse);
@@ -1877,12 +1877,10 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
                /* expected case */
                break;
        case ETP_BUS_SMB_ALERT_ONLY:
-               /* fall-through  */
        case ETP_BUS_PS2_SMB_ALERT:
                psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
                break;
        case ETP_BUS_SMB_HST_NTFY_ONLY:
-               /* fall-through  */
        case ETP_BUS_PS2_SMB_HST_NTFY:
                return true;
        default:
@@ -1897,7 +1895,7 @@ static bool elantech_use_host_notify(struct psmouse *psmouse,
 int elantech_init_smbus(struct psmouse *psmouse)
 {
        struct elantech_device_info info;
-       int error = -EINVAL;
+       int error;
 
        psmouse_reset(psmouse);
 
@@ -2015,7 +2013,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
 int elantech_init_ps2(struct psmouse *psmouse)
 {
        struct elantech_device_info info;
-       int error = -EINVAL;
+       int error;
 
        psmouse_reset(psmouse);
 
@@ -2036,7 +2034,7 @@ int elantech_init_ps2(struct psmouse *psmouse)
 int elantech_init(struct psmouse *psmouse)
 {
        struct elantech_device_info info;
-       int error = -EINVAL;
+       int error;
 
        psmouse_reset(psmouse);
 
index 72a083f..4dc4413 100644 (file)
@@ -238,7 +238,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
                /* we're not spewing, but this packet might be the start */
                priv->spew_flag = MAYBE_SPEWING;
 
-               /* fall-through */
+               fallthrough;
 
        case MAYBE_SPEWING:
                priv->spew_count++;
@@ -249,7 +249,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse,
                /* excessive spew detected, request recalibration */
                priv->spew_flag = SPEW_DETECTED;
 
-               /* fall-through */
+               fallthrough;
 
        case SPEW_DETECTED:
                /* only recalibrate when the overall delta to the cursor
index 0b75248..c112980 100644 (file)
@@ -105,7 +105,7 @@ static void navpoint_packet(struct navpoint *navpoint)
        case 0x19:      /* Module 0, Hello packet */
                if ((navpoint->data[1] & 0xf0) == 0x10)
                        break;
-               /* FALLTHROUGH */
+               fallthrough;
        default:
                dev_warn(navpoint->dev,
                         "spurious packet: data=0x%02x,0x%02x,...\n",
index 527ae0b..0b4a303 100644 (file)
@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
 {
        int type = *((unsigned int *)kp->arg);
 
-       return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
+       return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
 }
 
 static int __init psmouse_init(void)
index e99d9bf..2716d2b 100644 (file)
@@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
 
        fsp_reg_write_enable(psmouse, false);
 
-       return count;
+       return retval;
 }
 
 PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
@@ -794,7 +794,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
                /* on-pad click, filter it if necessary */
                if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC)
                        packet[0] &= ~FSP_PB0_LBTN;
-               /* fall through */
+               fallthrough;
 
        case FSP_PKT_TYPE_NORMAL:
                /* normal packet */
index ea9242d..caa79c1 100644 (file)
@@ -128,7 +128,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
 
                                case SERIO_MS:
                                        sermouse->type = SERIO_MP;
-                                       /* fall through */
+                                       fallthrough;
 
                                case SERIO_MP:
                                        if ((data >> 2) & 3) break;     /* M++ Wireless Extension packet. */
@@ -139,7 +139,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
                                case SERIO_MZP:
                                case SERIO_MZPP:
                                        input_report_key(dev, BTN_SIDE,   (data >> 5) & 1);
-                                       /* fall through */
+                                       fallthrough;
 
                                case SERIO_MZ:
                                        input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
index da0bf85..64590b8 100644 (file)
@@ -21,8 +21,6 @@
 #elif defined(__arm__)
 /* defined in include/asm-arm/arch-xxx/irqs.h */
 #include <asm/irq.h>
-#elif defined(CONFIG_SH_CAYMAN)
-#include <asm/irq.h>
 #elif defined(CONFIG_PPC)
 extern int of_i8042_kbd_irq;
 extern int of_i8042_aux_irq;
index 0dddf27..d3eda48 100644 (file)
@@ -562,7 +562,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
                                                str = last_str;
                                                break;
                                        }
-                                       /* fall through - report timeout */
+                                       fallthrough;    /* report timeout */
                                case 0xfc:
                                case 0xfd:
                                case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break;
index a8c94a9..8a16e41 100644 (file)
@@ -418,7 +418,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data)
                        ps2dev->nak = 0;
                        break;
                }
-               /* Fall through */
+               fallthrough;
        default:
                /*
                 * Do not signal errors if we get unexpected reply while
index 530fd15..25bf8be 100644 (file)
@@ -247,7 +247,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k
 
        case KE_SW:
                value = ke->sw.value;
-               /* fall through */
+               fallthrough;
 
        case KE_VSW:
                input_report_switch(dev, ke->sw.code, value);
index 96d6557..44bb1f6 100644 (file)
@@ -676,8 +676,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
 
                        /* Mask out the Y tilt value used for pressure */
                        device->buffer[7] = (u8)((device->buffer[7]) & 0x7F);
+                       fallthrough;
 
-                       /* Fall thru */
                case 4:
                        /* Tilt */
                        input_report_abs(inputdev, ABS_TILT_X,
@@ -685,8 +685,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
 
                        input_report_abs(inputdev, ABS_TILT_Y,
                                         sign_extend32(device->buffer[7], 6));
+                       fallthrough;
 
-                       /* Fall thru */
                case 2:
                case 3:
                        /* Convert buttons, only 5 bits possible */
@@ -695,8 +695,8 @@ static void gtco_urb_callback(struct urb *urbinfo)
                        /* We don't apply any meaning to the bitmask,
                           just report */
                        input_event(inputdev, EV_MSC, MSC_SERIAL, val);
+                       fallthrough;
 
-                       /*  Fall thru */
                case 1:
                        /* All reports have X and Y coords in the same place */
                        val = get_unaligned_le16(&device->buffer[1]);
index 38f0874..749edbd 100644 (file)
@@ -146,7 +146,7 @@ static void pegasus_parse_packet(struct pegasus *pegasus)
        /* xy data */
        case BATTERY_LOW:
                dev_warn_once(&dev->dev, "Pen battery low\n");
-               /* fall through */
+               fallthrough;
 
        case BATTERY_NO_REPORT:
        case BATTERY_GOOD:
index a218973..6b71b0a 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/i2c.h>
 #include <linux/input/mt.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/property.h>
 #include <linux/slab.h>
@@ -129,6 +130,7 @@ struct t9_range {
 /* MXT_SPT_COMMSCONFIG_T18 */
 #define MXT_COMMS_CTRL         0
 #define MXT_COMMS_CMD          1
+#define MXT_COMMS_RETRIGEN     BIT(6)
 
 /* MXT_DEBUG_DIAGNOSTIC_T37 */
 #define MXT_DIAGNOSTIC_PAGEUP  0x01
@@ -308,6 +310,7 @@ struct mxt_data {
        struct t7_config t7_cfg;
        struct mxt_dbg dbg;
        struct gpio_desc *reset_gpio;
+       bool use_retrigen_workaround;
 
        /* Cached parameters from object table */
        u16 T5_address;
@@ -318,6 +321,7 @@ struct mxt_data {
        u16 T71_address;
        u8 T9_reportid_min;
        u8 T9_reportid_max;
+       u16 T18_address;
        u8 T19_reportid;
        u16 T44_address;
        u8 T100_reportid_min;
@@ -1190,9 +1194,11 @@ static int mxt_acquire_irq(struct mxt_data *data)
 
        enable_irq(data->irq);
 
-       error = mxt_process_messages_until_invalid(data);
-       if (error)
-               return error;
+       if (data->use_retrigen_workaround) {
+               error = mxt_process_messages_until_invalid(data);
+               if (error)
+                       return error;
+       }
 
        return 0;
 }
@@ -1282,6 +1288,38 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
        return crc;
 }
 
+static int mxt_check_retrigen(struct mxt_data *data)
+{
+       struct i2c_client *client = data->client;
+       int error;
+       int val;
+       struct irq_data *irqd;
+
+       data->use_retrigen_workaround = false;
+
+       irqd = irq_get_irq_data(data->irq);
+       if (!irqd)
+               return -EINVAL;
+
+       if (irqd_is_level_type(irqd))
+               return 0;
+
+       if (data->T18_address) {
+               error = __mxt_read_reg(client,
+                                      data->T18_address + MXT_COMMS_CTRL,
+                                      1, &val);
+               if (error)
+                       return error;
+
+               if (val & MXT_COMMS_RETRIGEN)
+                       return 0;
+       }
+
+       dev_warn(&client->dev, "Enabling RETRIGEN workaround\n");
+       data->use_retrigen_workaround = true;
+       return 0;
+}
+
 static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
 {
        struct device *dev = &data->client->dev;
@@ -1561,6 +1599,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
 
        mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
 
+       ret = mxt_check_retrigen(data);
+       if (ret)
+               goto release_mem;
+
        ret = mxt_soft_reset(data);
        if (ret)
                goto release_mem;
@@ -1604,6 +1646,7 @@ static void mxt_free_object_table(struct mxt_data *data)
        data->T71_address = 0;
        data->T9_reportid_min = 0;
        data->T9_reportid_max = 0;
+       data->T18_address = 0;
        data->T19_reportid = 0;
        data->T44_address = 0;
        data->T100_reportid_min = 0;
@@ -1678,6 +1721,9 @@ static int mxt_parse_object_table(struct mxt_data *data,
                                                object->num_report_ids - 1;
                        data->num_touchids = object->num_report_ids;
                        break;
+               case MXT_SPT_COMMSCONFIG_T18:
+                       data->T18_address = object->start_address;
+                       break;
                case MXT_SPT_MESSAGECOUNT_T44:
                        data->T44_address = object->start_address;
                        break;
@@ -2141,6 +2187,10 @@ static int mxt_initialize(struct mxt_data *data)
        if (error)
                return error;
 
+       error = mxt_check_retrigen(data);
+       if (error)
+               return error;
+
        error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
                                        &client->dev, GFP_KERNEL, data,
                                        mxt_config_cb);
index 3a4f18d..6ff81d4 100644 (file)
@@ -288,7 +288,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
                wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
                return edt_ft5x06_ts_readwrite(tsdata->client, 4,
                                        wrbuf, 0, NULL);
-       /* fallthrough */
+
        case EDT_M09:
        case EDT_M12:
        case EV_FT:
@@ -330,7 +330,6 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
                }
                break;
 
-       /* fallthrough */
        case EDT_M09:
        case EDT_M12:
        case EV_FT:
index 5477a57..b0bd5bb 100644 (file)
@@ -955,7 +955,7 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
                        break;
 
                ts->state = ELAN_STATE_NORMAL;
-               /* fall through */
+               fallthrough;
 
        case ELAN_STATE_NORMAL:
 
index d6772a2..e0bacd3 100644 (file)
@@ -348,7 +348,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
 
        case 1: /* 6-byte protocol */
                input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
-               /* fall through */
+               fallthrough;
 
        case 2: /* 4-byte protocol */
                input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
index e007e2e..a6597f0 100644 (file)
@@ -8,7 +8,9 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
@@ -16,6 +18,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/sizes.h>
 #include <linux/timer.h>
 #include <asm/unaligned.h>
 
 #define EXC3000_SLOTS_PER_FRAME                5
 #define EXC3000_LEN_FRAME              66
 #define EXC3000_LEN_POINT              10
-#define EXC3000_MT_EVENT               6
+
+#define EXC3000_LEN_MODEL_NAME         16
+#define EXC3000_LEN_FW_VERSION         16
+
+#define EXC3000_MT1_EVENT              0x06
+#define EXC3000_MT2_EVENT              0x18
+
 #define EXC3000_TIMEOUT_MS             100
 
+#define EXC3000_RESET_MS               10
+#define EXC3000_READY_MS               100
+
+static const struct i2c_device_id exc3000_id[];
+
+struct eeti_dev_info {
+       const char *name;
+       int max_xy;
+};
+
+enum eeti_dev_id {
+       EETI_EXC3000,
+       EETI_EXC80H60,
+       EETI_EXC80H84,
+};
+
+static struct eeti_dev_info exc3000_info[] = {
+       [EETI_EXC3000] = {
+               .name = "EETI EXC3000 Touch Screen",
+               .max_xy = SZ_4K - 1,
+       },
+       [EETI_EXC80H60] = {
+               .name = "EETI EXC80H60 Touch Screen",
+               .max_xy = SZ_16K - 1,
+       },
+       [EETI_EXC80H84] = {
+               .name = "EETI EXC80H84 Touch Screen",
+               .max_xy = SZ_16K - 1,
+       },
+};
+
 struct exc3000_data {
        struct i2c_client *client;
+       const struct eeti_dev_info *info;
        struct input_dev *input;
        struct touchscreen_properties prop;
+       struct gpio_desc *reset;
        struct timer_list timer;
        u8 buf[2 * EXC3000_LEN_FRAME];
+       struct completion wait_event;
+       struct mutex query_lock;
+       int query_result;
+       char model[EXC3000_LEN_MODEL_NAME];
+       char fw_version[EXC3000_LEN_FW_VERSION];
 };
 
 static void exc3000_report_slots(struct input_dev *input,
@@ -58,10 +105,15 @@ static void exc3000_timer(struct timer_list *t)
        input_sync(data->input);
 }
 
-static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
+static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
 {
+       struct i2c_client *client = data->client;
+       u8 expected_event = EXC3000_MT1_EVENT;
        int ret;
 
+       if (data->info->max_xy == SZ_16K - 1)
+               expected_event = EXC3000_MT2_EVENT;
+
        ret = i2c_master_send(client, "'", 2);
        if (ret < 0)
                return ret;
@@ -76,19 +128,21 @@ static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
        if (ret != EXC3000_LEN_FRAME)
                return -EIO;
 
-       if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME ||
-                       buf[2] != EXC3000_MT_EVENT)
+       if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME)
+               return -EINVAL;
+
+       if (buf[2] != expected_event)
                return -EINVAL;
 
        return 0;
 }
 
-static int exc3000_read_data(struct i2c_client *client,
+static int exc3000_read_data(struct exc3000_data *data,
                             u8 *buf, int *n_slots)
 {
        int error;
 
-       error = exc3000_read_frame(client, buf);
+       error = exc3000_read_frame(data, buf);
        if (error)
                return error;
 
@@ -98,7 +152,7 @@ static int exc3000_read_data(struct i2c_client *client,
 
        if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
                /* Read 2nd frame to get the rest of the contacts. */
-               error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME);
+               error = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME);
                if (error)
                        return error;
 
@@ -110,6 +164,28 @@ static int exc3000_read_data(struct i2c_client *client,
        return 0;
 }
 
+static int exc3000_query_interrupt(struct exc3000_data *data)
+{
+       u8 *buf = data->buf;
+       int error;
+
+       error = i2c_master_recv(data->client, buf, EXC3000_LEN_FRAME);
+       if (error < 0)
+               return error;
+
+       if (buf[0] != 'B')
+               return -EPROTO;
+
+       if (buf[4] == 'E')
+               strlcpy(data->model, buf + 5, sizeof(data->model));
+       else if (buf[4] == 'D')
+               strlcpy(data->fw_version, buf + 5, sizeof(data->fw_version));
+       else
+               return -EPROTO;
+
+       return 0;
+}
+
 static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
 {
        struct exc3000_data *data = dev_id;
@@ -118,7 +194,13 @@ static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
        int slots, total_slots;
        int error;
 
-       error = exc3000_read_data(data->client, buf, &total_slots);
+       if (mutex_is_locked(&data->query_lock)) {
+               data->query_result = exc3000_query_interrupt(data);
+               complete(&data->wait_event);
+               goto out;
+       }
+
+       error = exc3000_read_data(data, buf, &total_slots);
        if (error) {
                /* Schedule a timer to release "stuck" contacts */
                mod_timer(&data->timer,
@@ -145,31 +227,132 @@ out:
        return IRQ_HANDLED;
 }
 
-static int exc3000_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
+static ssize_t fw_version_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct exc3000_data *data = i2c_get_clientdata(client);
+       static const u8 request[68] = {
+               0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'D', 0x00
+       };
+       int error;
+
+       mutex_lock(&data->query_lock);
+
+       data->query_result = -ETIMEDOUT;
+       reinit_completion(&data->wait_event);
+
+       error = i2c_master_send(client, request, sizeof(request));
+       if (error < 0) {
+               mutex_unlock(&data->query_lock);
+               return error;
+       }
+
+       wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+       mutex_unlock(&data->query_lock);
+
+       if (data->query_result < 0)
+               return data->query_result;
+
+       return sprintf(buf, "%s\n", data->fw_version);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static ssize_t exc3000_get_model(struct exc3000_data *data)
+{
+       static const u8 request[68] = {
+               0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'E', 0x00
+       };
+       struct i2c_client *client = data->client;
+       int error;
+
+       mutex_lock(&data->query_lock);
+       data->query_result = -ETIMEDOUT;
+       reinit_completion(&data->wait_event);
+
+       error = i2c_master_send(client, request, sizeof(request));
+       if (error < 0) {
+               mutex_unlock(&data->query_lock);
+               return error;
+       }
+
+       wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ);
+       mutex_unlock(&data->query_lock);
+
+       return data->query_result;
+}
+
+static ssize_t model_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct exc3000_data *data = i2c_get_clientdata(client);
+       int error;
+
+       error = exc3000_get_model(data);
+       if (error < 0)
+               return error;
+
+       return sprintf(buf, "%s\n", data->model);
+}
+static DEVICE_ATTR_RO(model);
+
+static struct attribute *sysfs_attrs[] = {
+       &dev_attr_fw_version.attr,
+       &dev_attr_model.attr,
+       NULL
+};
+
+static struct attribute_group exc3000_attribute_group = {
+       .attrs = sysfs_attrs
+};
+
+static int exc3000_probe(struct i2c_client *client)
 {
        struct exc3000_data *data;
        struct input_dev *input;
-       int error;
+       int error, max_xy, retry;
 
        data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
        data->client = client;
+       data->info = device_get_match_data(&client->dev);
+       if (!data->info) {
+               enum eeti_dev_id eeti_dev_id =
+                       i2c_match_id(exc3000_id, client)->driver_data;
+               data->info = &exc3000_info[eeti_dev_id];
+       }
        timer_setup(&data->timer, exc3000_timer, 0);
+       init_completion(&data->wait_event);
+       mutex_init(&data->query_lock);
+
+       data->reset = devm_gpiod_get_optional(&client->dev, "reset",
+                                             GPIOD_OUT_HIGH);
+       if (IS_ERR(data->reset))
+               return PTR_ERR(data->reset);
+
+       if (data->reset) {
+               msleep(EXC3000_RESET_MS);
+               gpiod_set_value_cansleep(data->reset, 0);
+               msleep(EXC3000_READY_MS);
+       }
 
        input = devm_input_allocate_device(&client->dev);
        if (!input)
                return -ENOMEM;
 
        data->input = input;
+       input_set_drvdata(input, data);
 
-       input->name = "EETI EXC3000 Touch Screen";
+       input->name = data->info->name;
        input->id.bustype = BUS_I2C;
 
-       input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
-       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+       max_xy = data->info->max_xy;
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+
        touchscreen_parse_properties(input, true, &data->prop);
 
        error = input_mt_init_slots(input, EXC3000_NUM_SLOTS,
@@ -187,18 +370,49 @@ static int exc3000_probe(struct i2c_client *client,
        if (error)
                return error;
 
+       /*
+        * I²C does not have built-in recovery, so retry on failure. This
+        * ensures, that the device probe will not fail for temporary issues
+        * on the bus.  This is not needed for the sysfs calls (userspace
+        * will receive the error code and can start another query) and
+        * cannot be done for touch events (but that only means loosing one
+        * or two touch events anyways).
+        */
+       for (retry = 0; retry < 3; retry++) {
+               error = exc3000_get_model(data);
+               if (!error)
+                       break;
+               dev_warn(&client->dev, "Retry %d get EETI EXC3000 model: %d\n",
+                        retry + 1, error);
+       }
+
+       if (error)
+               return error;
+
+       dev_dbg(&client->dev, "TS Model: %s", data->model);
+
+       i2c_set_clientdata(client, data);
+
+       error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
+       if (error)
+               return error;
+
        return 0;
 }
 
 static const struct i2c_device_id exc3000_id[] = {
-       { "exc3000", 0 },
+       { "exc3000", EETI_EXC3000 },
+       { "exc80h60", EETI_EXC80H60 },
+       { "exc80h84", EETI_EXC80H84 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, exc3000_id);
 
 #ifdef CONFIG_OF
 static const struct of_device_id exc3000_of_match[] = {
-       { .compatible = "eeti,exc3000" },
+       { .compatible = "eeti,exc3000", .data = &exc3000_info[EETI_EXC3000] },
+       { .compatible = "eeti,exc80h60", .data = &exc3000_info[EETI_EXC80H60] },
+       { .compatible = "eeti,exc80h84", .data = &exc3000_info[EETI_EXC80H84] },
        { }
 };
 MODULE_DEVICE_TABLE(of, exc3000_of_match);
@@ -210,7 +424,7 @@ static struct i2c_driver exc3000_driver = {
                .of_match_table = of_match_ptr(exc3000_of_match),
        },
        .id_table       = exc3000_id,
-       .probe          = exc3000_probe,
+       .probe_new      = exc3000_probe,
 };
 
 module_i2c_driver(exc3000_driver);
index 5875bb1..3162b68 100644 (file)
@@ -289,7 +289,7 @@ static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr)
                break;
        case IQS5XX_BL_CMD_EXEC:
                usleep_range(10000, 10100);
-               /* fall through */
+               fallthrough;
        default:
                return 0;
        }
index 1af08d3..f15713a 100644 (file)
@@ -130,7 +130,6 @@ static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id)
 
                switch (buf[1] & EVENT_TAG_MASK) {
                case EVENT_INIT:
-                       /* fall through */
                case EVENT_MIDDLE:
                        input_report_abs(data->input_dev, ABS_X, x);
                        input_report_abs(data->input_dev, ABS_Y, y);
index b54cc64..df94686 100644 (file)
@@ -255,7 +255,7 @@ static void stmfts_parse_events(struct stmfts_data *sdata)
                case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY:
                case STMFTS_EV_STATUS:
                        complete(&sdata->cmd_done);
-                       /* fall through */
+                       fallthrough;
 
                case STMFTS_EV_NO_EVENT:
                case STMFTS_EV_DEBUG:
index b622af7..bef5d75 100644 (file)
@@ -129,140 +129,8 @@ config MSM_IOMMU
 
          If unsure, say N here.
 
-config IOMMU_PGTABLES_L2
-       def_bool y
-       depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
-
-# AMD IOMMU support
-config AMD_IOMMU
-       bool "AMD IOMMU support"
-       select SWIOTLB
-       select PCI_MSI
-       select PCI_ATS
-       select PCI_PRI
-       select PCI_PASID
-       select IOMMU_API
-       select IOMMU_IOVA
-       select IOMMU_DMA
-       depends on X86_64 && PCI && ACPI
-       help
-         With this option you can enable support for AMD IOMMU hardware in
-         your system. An IOMMU is a hardware component which provides
-         remapping of DMA memory accesses from devices. With an AMD IOMMU you
-         can isolate the DMA memory of different devices and protect the
-         system from misbehaving device drivers or hardware.
-
-         You can find out if your system has an AMD IOMMU if you look into
-         your BIOS for an option to enable it or if you have an IVRS ACPI
-         table.
-
-config AMD_IOMMU_V2
-       tristate "AMD IOMMU Version 2 driver"
-       depends on AMD_IOMMU
-       select MMU_NOTIFIER
-       help
-         This option enables support for the AMD IOMMUv2 features of the IOMMU
-         hardware. Select this option if you want to use devices that support
-         the PCI PRI and PASID interface.
-
-config AMD_IOMMU_DEBUGFS
-       bool "Enable AMD IOMMU internals in DebugFS"
-       depends on AMD_IOMMU && IOMMU_DEBUGFS
-       help
-         !!!WARNING!!!  !!!WARNING!!!  !!!WARNING!!!  !!!WARNING!!!
-
-         DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
-         Exposes AMD IOMMU device internals in DebugFS.
-
-         This option is -NOT- intended for production environments, and should
-         not generally be enabled.
-
-# Intel IOMMU support
-config DMAR_TABLE
-       bool
-
-config INTEL_IOMMU
-       bool "Support for Intel IOMMU using DMA Remapping Devices"
-       depends on PCI_MSI && ACPI && (X86 || IA64)
-       select DMA_OPS
-       select IOMMU_API
-       select IOMMU_IOVA
-       select NEED_DMA_MAP_STATE
-       select DMAR_TABLE
-       select SWIOTLB
-       select IOASID
-       help
-         DMA remapping (DMAR) devices support enables independent address
-         translations for Direct Memory Access (DMA) from devices.
-         These DMA remapping devices are reported via ACPI tables
-         and include PCI device scope covered by these DMA
-         remapping devices.
-
-config INTEL_IOMMU_DEBUGFS
-       bool "Export Intel IOMMU internals in Debugfs"
-       depends on INTEL_IOMMU && IOMMU_DEBUGFS
-       help
-         !!!WARNING!!!
-
-         DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
-
-         Expose Intel IOMMU internals in Debugfs.
-
-         This option is -NOT- intended for production environments, and should
-         only be enabled for debugging Intel IOMMU.
-
-config INTEL_IOMMU_SVM
-       bool "Support for Shared Virtual Memory with Intel IOMMU"
-       depends on INTEL_IOMMU && X86_64
-       select PCI_PASID
-       select PCI_PRI
-       select MMU_NOTIFIER
-       select IOASID
-       help
-         Shared Virtual Memory (SVM) provides a facility for devices
-         to access DMA resources through process address space by
-         means of a Process Address Space ID (PASID).
-
-config INTEL_IOMMU_DEFAULT_ON
-       def_bool y
-       prompt "Enable Intel DMA Remapping Devices by default"
-       depends on INTEL_IOMMU
-       help
-         Selecting this option will enable a DMAR device at boot time if
-         one is found. If this option is not selected, DMAR support can
-         be enabled by passing intel_iommu=on to the kernel.
-
-config INTEL_IOMMU_BROKEN_GFX_WA
-       bool "Workaround broken graphics drivers (going away soon)"
-       depends on INTEL_IOMMU && BROKEN && X86
-       help
-         Current Graphics drivers tend to use physical address
-         for DMA and avoid using DMA APIs. Setting this config
-         option permits the IOMMU driver to set a unity map for
-         all the OS-visible memory. Hence the driver can continue
-         to use physical addresses for DMA, at least until this
-         option is removed in the 2.6.32 kernel.
-
-config INTEL_IOMMU_FLOPPY_WA
-       def_bool y
-       depends on INTEL_IOMMU && X86
-       help
-         Floppy disk drivers are known to bypass DMA API calls
-         thereby failing to work when IOMMU is enabled. This
-         workaround will setup a 1:1 mapping for the first
-         16MiB to make floppy (an ISA device) work.
-
-config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
-       bool "Enable Intel IOMMU scalable mode by default"
-       depends on INTEL_IOMMU
-       help
-         Selecting this option will enable by default the scalable mode if
-         hardware presents the capability. The scalable mode is defined in
-         VT-d 3.0. The scalable mode capability could be checked by reading
-         /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
-         is not selected, scalable mode support could also be enabled by
-         passing intel_iommu=sm_on to the kernel. If not sure, please use
-         the default value.
+source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/intel/Kconfig"
 
 config IRQ_REMAP
        bool "Support for Interrupt Remapping"
@@ -276,7 +144,6 @@ config IRQ_REMAP
 # OMAP IOMMU support
 config OMAP_IOMMU
        bool "OMAP IOMMU Support"
-       depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
        depends on ARCH_OMAP2PLUS || COMPILE_TEST
        select IOMMU_API
        help
@@ -294,7 +161,6 @@ config OMAP_IOMMU_DEBUG
 
 config ROCKCHIP_IOMMU
        bool "Rockchip IOMMU Support"
-       depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
        depends on ARCH_ROCKCHIP || COMPILE_TEST
        select IOMMU_API
        select ARM_DMA_USE_IOMMU
@@ -311,7 +177,6 @@ config SUN50I_IOMMU
        depends on ARCH_SUNXI || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
-       select IOMMU_DMA
        help
          Support for the IOMMU introduced in the Allwinner H6 SoCs.
 
@@ -338,7 +203,7 @@ config TEGRA_IOMMU_SMMU
 
 config EXYNOS_IOMMU
        bool "Exynos IOMMU Support"
-       depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
+       depends on ARCH_EXYNOS || COMPILE_TEST
        depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
        select IOMMU_API
        select ARM_DMA_USE_IOMMU
@@ -361,7 +226,6 @@ config EXYNOS_IOMMU_DEBUG
 
 config IPMMU_VMSA
        bool "Renesas VMSA-compatible IPMMU"
-       depends on ARM || IOMMU_DMA
        depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
@@ -383,7 +247,7 @@ config SPAPR_TCE_IOMMU
 # ARM IOMMU support
 config ARM_SMMU
        tristate "ARM Ltd. System MMU (SMMU) Support"
-       depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
+       depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU if ARM
@@ -469,11 +333,9 @@ config S390_AP_IOMMU
 
 config MTK_IOMMU
        bool "MTK IOMMU Support"
-       depends on HAS_DMA
        depends on ARCH_MEDIATEK || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
-       select IOMMU_DMA
        select IOMMU_IO_PGTABLE_ARMV7S
        select MEMORY
        select MTK_SMI
index 3421901..11f1771 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-y += amd/ intel/ arm/
 obj-$(CONFIG_IOMMU_API) += iommu.o
 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
@@ -11,19 +12,8 @@ obj-$(CONFIG_IOASID) += ioasid.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
-obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o
-obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
-obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
-obj-$(CONFIG_DMAR_TABLE) += intel/dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/iommu.o intel/pasid.o
-obj-$(CONFIG_INTEL_IOMMU) += intel/trace.o
-obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel/debugfs.o
-obj-$(CONFIG_INTEL_IOMMU_SVM) += intel/svm.o
 obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
-obj-$(CONFIG_IRQ_REMAP) += intel/irq_remapping.o irq_remapping.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
 obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
 obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
@@ -35,6 +25,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
 obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
 obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
-obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
 obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
 obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
new file mode 100644 (file)
index 0000000..1f061d9
--- /dev/null
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# AMD IOMMU support
+config AMD_IOMMU
+       bool "AMD IOMMU support"
+       select SWIOTLB
+       select PCI_MSI
+       select PCI_ATS
+       select PCI_PRI
+       select PCI_PASID
+       select IOMMU_API
+       select IOMMU_IOVA
+       select IOMMU_DMA
+       depends on X86_64 && PCI && ACPI
+       help
+         With this option you can enable support for AMD IOMMU hardware in
+         your system. An IOMMU is a hardware component which provides
+         remapping of DMA memory accesses from devices. With an AMD IOMMU you
+         can isolate the DMA memory of different devices and protect the
+         system from misbehaving device drivers or hardware.
+
+         You can find out if your system has an AMD IOMMU if you look into
+         your BIOS for an option to enable it or if you have an IVRS ACPI
+         table.
+
+config AMD_IOMMU_V2
+       tristate "AMD IOMMU Version 2 driver"
+       depends on AMD_IOMMU
+       select MMU_NOTIFIER
+       help
+         This option enables support for the AMD IOMMUv2 features of the IOMMU
+         hardware. Select this option if you want to use devices that support
+         the PCI PRI and PASID interface.
+
+config AMD_IOMMU_DEBUGFS
+       bool "Enable AMD IOMMU internals in DebugFS"
+       depends on AMD_IOMMU && IOMMU_DEBUGFS
+       help
+         !!!WARNING!!!  !!!WARNING!!!  !!!WARNING!!!  !!!WARNING!!!
+
+         DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!!
+         Exposes AMD IOMMU device internals in DebugFS.
+
+         This option is -NOT- intended for production environments, and should
+         not generally be enabled.
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
new file mode 100644 (file)
index 0000000..dc5a2fa
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o
+obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
index 6ebd482..958050c 100644 (file)
@@ -720,21 +720,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
 
 static void __init free_ppr_log(struct amd_iommu *iommu)
 {
-       if (iommu->ppr_log == NULL)
-               return;
-
        free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
 }
 
 static void free_ga_log(struct amd_iommu *iommu)
 {
 #ifdef CONFIG_IRQ_REMAP
-       if (iommu->ga_log)
-               free_pages((unsigned long)iommu->ga_log,
-                           get_order(GA_LOG_SIZE));
-       if (iommu->ga_log_tail)
-               free_pages((unsigned long)iommu->ga_log_tail,
-                           get_order(8));
+       free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
+       free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
 #endif
 }
 
@@ -1842,7 +1835,7 @@ static void print_iommu_info(void)
                pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
 
                if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
-                       pci_info(pdev, "Extended features (%#llx):\n",
+                       pci_info(pdev, "Extended features (%#llx):",
                                 iommu->features);
                        for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
                                if (iommu_feature(iommu, (1ULL << i)))
index 2f22326..ba9f3db 100644 (file)
@@ -162,7 +162,18 @@ static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
        pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
 }
 
-static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
+{
+       atomic64_set(&domain->pt_root, root);
+}
+
+static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
+{
+       amd_iommu_domain_set_pt_root(domain, 0);
+}
+
+static void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+                                        u64 *root, int mode)
 {
        u64 pt_root;
 
@@ -170,7 +181,7 @@ static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
        pt_root = mode & 7;
        pt_root |= (u64)root;
 
-       return pt_root;
+       amd_iommu_domain_set_pt_root(domain, pt_root);
 }
 
 static struct iommu_dev_data *alloc_dev_data(u16 devid)
@@ -1410,7 +1421,7 @@ static bool increase_address_space(struct protection_domain *domain,
        struct domain_pgtable pgtable;
        unsigned long flags;
        bool ret = true;
-       u64 *pte, root;
+       u64 *pte;
 
        spin_lock_irqsave(&domain->lock, flags);
 
@@ -1438,8 +1449,7 @@ static bool increase_address_space(struct protection_domain *domain,
         * Device Table needs to be updated and flushed before the new root can
         * be published.
         */
-       root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
-       atomic64_set(&domain->pt_root, root);
+       amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
 
        ret = true;
 
@@ -2319,7 +2329,7 @@ static void protection_domain_free(struct protection_domain *domain)
                domain_id_free(domain->id);
 
        amd_iommu_domain_get_pgtable(domain, &pgtable);
-       atomic64_set(&domain->pt_root, 0);
+       amd_iommu_domain_clr_pt_root(domain);
        free_pagetable(&pgtable);
 
        kfree(domain);
@@ -2327,7 +2337,7 @@ static void protection_domain_free(struct protection_domain *domain)
 
 static int protection_domain_init(struct protection_domain *domain, int mode)
 {
-       u64 *pt_root = NULL, root;
+       u64 *pt_root = NULL;
 
        BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
 
@@ -2343,8 +2353,7 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
                        return -ENOMEM;
        }
 
-       root = amd_iommu_domain_encode_pgtable(pt_root, mode);
-       atomic64_set(&domain->pt_root, root);
+       amd_iommu_domain_set_pgtable(domain, pt_root, mode);
 
        return 0;
 }
@@ -2713,8 +2722,8 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
        /* First save pgtable configuration*/
        amd_iommu_domain_get_pgtable(domain, &pgtable);
 
-       /* Update data structure */
-       atomic64_set(&domain->pt_root, 0);
+       /* Remove page-table from domain */
+       amd_iommu_domain_clr_pt_root(domain);
 
        /* Make changes visible to IOMMUs */
        update_domain(domain);
index e4b025c..c259108 100644 (file)
@@ -495,7 +495,7 @@ static void do_fault(struct work_struct *work)
        if (access_error(vma, fault))
                goto out;
 
-       ret = handle_mm_fault(vma, address, flags);
+       ret = handle_mm_fault(vma, address, flags, NULL);
 out:
        mmap_read_unlock(mm);
 
similarity index 52%
rename from arch/s390/numa/Makefile
rename to drivers/iommu/arm/Makefile
index c89d26f..0f9efea 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-y                  += numa.o
+obj-y += arm-smmu/ arm-smmu-v3/
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
new file mode 100644 (file)
index 0000000..569e24e
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
similarity index 99%
rename from drivers/iommu/arm-smmu-v3.c
rename to drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f578677..7196207 100644 (file)
@@ -1479,7 +1479,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
                }
 
                /*
-                * Try to unlock the cmq lock. This will fail if we're the last
+                * Try to unlock the cmdq lock. This will fail if we're the last
                 * reader, in which case we can safely update cmdq->q.llq.cons
                 */
                if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
@@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
        if (!ops)
                return -ENODEV;
 
-       return ops->map(ops, iova, paddr, size, prot);
+       return ops->map(ops, iova, paddr, size, prot, gfp);
 }
 
 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
new file mode 100644 (file)
index 0000000..e240a7b
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
similarity index 72%
rename from drivers/iommu/arm-smmu-impl.c
rename to drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index c75b9d9..f4ff124 100644 (file)
@@ -147,16 +147,57 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
        .reset = arm_mmu500_reset,
 };
 
+static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
+{
+       /*
+        * Marvell Armada-AP806 erratum #582743.
+        * Split all the readq to double readl
+        */
+       return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
+}
+
+static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
+                              u64 val)
+{
+       /*
+        * Marvell Armada-AP806 erratum #582743.
+        * Split all the writeq to double writel
+        */
+       hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
+}
+
+static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
+{
+
+       /*
+        * Armada-AP806 erratum #582743.
+        * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
+        * formats altogether and allow using 32 bits access on the
+        * interconnect.
+        */
+       smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
+                           ARM_SMMU_FEAT_FMT_AARCH64_16K |
+                           ARM_SMMU_FEAT_FMT_AARCH64_64K);
+
+       return 0;
+}
+
+static const struct arm_smmu_impl mrvl_mmu500_impl = {
+       .read_reg64 = mrvl_mmu500_readq,
+       .write_reg64 = mrvl_mmu500_writeq,
+       .cfg_probe = mrvl_mmu500_cfg_probe,
+       .reset = arm_mmu500_reset,
+};
+
 
 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
 {
        const struct device_node *np = smmu->dev->of_node;
 
        /*
-        * We will inevitably have to combine model-specific implementation
-        * quirks with platform-specific integration quirks, but everything
-        * we currently support happens to work out as straightforward
-        * mutually-exclusive assignments.
+        * Set the impl for model-specific implementation quirks first,
+        * such that platform integration quirks can pick it up and
+        * inherit from it if necessary.
         */
        switch (smmu->model) {
        case ARM_MMU500:
@@ -168,12 +209,21 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
                break;
        }
 
+       /* This is implicitly MMU-400 */
        if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
                smmu->impl = &calxeda_impl;
 
+       if (of_device_is_compatible(np, "nvidia,tegra194-smmu"))
+               return nvidia_smmu_impl_init(smmu);
+
        if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
-           of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
+           of_device_is_compatible(np, "qcom,sc7180-smmu-500") ||
+           of_device_is_compatible(np, "qcom,sm8150-smmu-500") ||
+           of_device_is_compatible(np, "qcom,sm8250-smmu-500"))
                return qcom_smmu_impl_init(smmu);
 
+       if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
+               smmu->impl = &mrvl_mmu500_impl;
+
        return smmu;
 }
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
new file mode 100644 (file)
index 0000000..3136805
--- /dev/null
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 NVIDIA CORPORATION.  All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "arm-smmu.h"
+
+/*
+ * Tegra194 has three ARM MMU-500 Instances.
+ * Two of them are used together and must be programmed identically for
+ * interleaved IOVA accesses across them and translates accesses from
+ * non-isochronous HW devices.
+ * Third one is used for translating accesses from isochronous HW devices.
+ * This implementation supports programming of the two instances that must
+ * be programmed identically.
+ * The third instance usage is through standard arm-smmu driver itself and
+ * is out of scope of this implementation.
+ */
+#define NUM_SMMU_INSTANCES 2
+
+struct nvidia_smmu {
+       struct arm_smmu_device  smmu;
+       void __iomem            *bases[NUM_SMMU_INSTANCES];
+};
+
+static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
+                                            unsigned int inst, int page)
+{
+       struct nvidia_smmu *nvidia_smmu;
+
+       nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
+       return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
+}
+
+static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
+                               int page, int offset)
+{
+       void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+       return readl_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
+                                 int page, int offset, u32 val)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+               void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+               writel_relaxed(val, reg);
+       }
+}
+
+static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
+                                 int page, int offset)
+{
+       void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
+
+       return readq_relaxed(reg);
+}
+
+static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
+                                   int page, int offset, u64 val)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+               void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
+
+               writeq_relaxed(val, reg);
+       }
+}
+
+static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
+                                int sync, int status)
+{
+       unsigned int delay;
+
+       arm_smmu_writel(smmu, page, sync, 0);
+
+       for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+               unsigned int spin_cnt;
+
+               for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+                       u32 val = 0;
+                       unsigned int i;
+
+                       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+                               void __iomem *reg;
+
+                               reg = nvidia_smmu_page(smmu, i, page) + status;
+                               val |= readl_relaxed(reg);
+                       }
+
+                       if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
+                               return;
+
+                       cpu_relax();
+               }
+
+               udelay(delay);
+       }
+
+       dev_err_ratelimited(smmu->dev,
+                           "TLB sync timed out -- SMMU may be deadlocked\n");
+}
+
+static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+               u32 val;
+               void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
+                                   ARM_SMMU_GR0_sGFSR;
+
+               /* clear global FSR */
+               val = readl_relaxed(reg);
+               writel_relaxed(val, reg);
+       }
+
+       return 0;
+}
+
+static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
+                                                struct arm_smmu_device *smmu,
+                                                int inst)
+{
+       u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+       void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
+
+       gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+       if (!gfsr)
+               return IRQ_NONE;
+
+       gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+       gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+       gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+       dev_err_ratelimited(smmu->dev,
+                           "Unexpected global fault, this could be serious\n");
+       dev_err_ratelimited(smmu->dev,
+                           "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+                           gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+       writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
+{
+       unsigned int inst;
+       irqreturn_t ret = IRQ_NONE;
+       struct arm_smmu_device *smmu = dev;
+
+       for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+               irqreturn_t irq_ret;
+
+               irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
+               if (irq_ret == IRQ_HANDLED)
+                       ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
+                                                 struct arm_smmu_device *smmu,
+                                                 int idx, int inst)
+{
+       u32 fsr, fsynr, cbfrsynra;
+       unsigned long iova;
+       void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
+       void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
+
+       fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+       if (!(fsr & ARM_SMMU_FSR_FAULT))
+               return IRQ_NONE;
+
+       fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+       iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+       cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
+
+       dev_err_ratelimited(smmu->dev,
+                           "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
+                           fsr, iova, fsynr, cbfrsynra, idx);
+
+       writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
+{
+       int idx;
+       unsigned int inst;
+       irqreturn_t ret = IRQ_NONE;
+       struct arm_smmu_device *smmu;
+       struct iommu_domain *domain = dev;
+       struct arm_smmu_domain *smmu_domain;
+
+       smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
+       smmu = smmu_domain->smmu;
+
+       for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+               irqreturn_t irq_ret;
+
+               /*
+                * Interrupt line is shared between all contexts.
+                * Check for faults across all contexts.
+                */
+               for (idx = 0; idx < smmu->num_context_banks; idx++) {
+                       irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
+                                                                idx, inst);
+                       if (irq_ret == IRQ_HANDLED)
+                               ret = IRQ_HANDLED;
+               }
+       }
+
+       return ret;
+}
+
+static const struct arm_smmu_impl nvidia_smmu_impl = {
+       .read_reg = nvidia_smmu_read_reg,
+       .write_reg = nvidia_smmu_write_reg,
+       .read_reg64 = nvidia_smmu_read_reg64,
+       .write_reg64 = nvidia_smmu_write_reg64,
+       .reset = nvidia_smmu_reset,
+       .tlb_sync = nvidia_smmu_tlb_sync,
+       .global_fault = nvidia_smmu_global_fault,
+       .context_fault = nvidia_smmu_context_fault,
+};
+
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+       struct resource *res;
+       struct device *dev = smmu->dev;
+       struct nvidia_smmu *nvidia_smmu;
+       struct platform_device *pdev = to_platform_device(dev);
+
+       nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
+       if (!nvidia_smmu)
+               return ERR_PTR(-ENOMEM);
+
+       /*
+        * Copy the data from struct arm_smmu_device *smmu allocated in
+        * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
+        * pointer used in arm-smmu.c once this function returns.
+        * This is necessary to derive nvidia_smmu from smmu pointer passed
+        * through arm_smmu_impl function calls subsequently.
+        */
+       nvidia_smmu->smmu = *smmu;
+       /* Instance 0 is ioremapped by arm-smmu.c. */
+       nvidia_smmu->bases[0] = smmu->base;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return ERR_PTR(-ENODEV);
+
+       nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
+       if (IS_ERR(nvidia_smmu->bases[1]))
+               return ERR_CAST(nvidia_smmu->bases[1]);
+
+       nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+
+       /*
+        * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
+        * Once this function returns, arm-smmu.c would use arm_smmu_device
+        * allocated as part of struct nvidia_smmu.
+        */
+       devm_kfree(dev, smmu);
+
+       return &nvidia_smmu->smmu;
+}
similarity index 98%
rename from drivers/iommu/arm-smmu.c
rename to drivers/iommu/arm/arm-smmu/arm-smmu.c
index 243bc4c..09c42af 100644 (file)
@@ -52,9 +52,6 @@
  */
 #define QCOM_DUMMY_VAL -1
 
-#define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
-#define TLB_SPIN_COUNT                 10
-
 #define MSI_IOVA_BASE                  0x8000000
 #define MSI_IOVA_LENGTH                        0x100000
 
@@ -673,6 +670,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        enum io_pgtable_fmt fmt;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+       irqreturn_t (*context_fault)(int irq, void *dev);
 
        mutex_lock(&smmu_domain->init_mutex);
        if (smmu_domain->smmu)
@@ -835,7 +833,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
         * handler seeing a half-initialised domain state.
         */
        irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
-       ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+
+       if (smmu->impl && smmu->impl->context_fault)
+               context_fault = smmu->impl->context_fault;
+       else
+               context_fault = arm_smmu_context_fault;
+
+       ret = devm_request_irq(smmu->dev, irq, context_fault,
                               IRQF_SHARED, "arm-smmu-context-fault", domain);
        if (ret < 0) {
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
@@ -1227,7 +1231,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
                return -ENODEV;
 
        arm_smmu_rpm_get(smmu);
-       ret = ops->map(ops, iova, paddr, size, prot);
+       ret = ops->map(ops, iova, paddr, size, prot, gfp);
        arm_smmu_rpm_put(smmu);
 
        return ret;
@@ -1728,7 +1732,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        unsigned int size;
        u32 id;
        bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
-       int i;
+       int i, ret;
 
        dev_notice(smmu->dev, "probing hardware configuration...\n");
        dev_notice(smmu->dev, "SMMUv%d with:\n",
@@ -1891,6 +1895,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
        }
 
+       if (smmu->impl && smmu->impl->cfg_probe) {
+               ret = smmu->impl->cfg_probe(smmu);
+               if (ret)
+                       return ret;
+       }
+
        /* Now we've corralled the various formats, what'll it do? */
        if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
                smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
@@ -1918,9 +1928,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
                           smmu->ipa_size, smmu->pa_size);
 
-       if (smmu->impl && smmu->impl->cfg_probe)
-               return smmu->impl->cfg_probe(smmu);
-
        return 0;
 }
 
@@ -1946,6 +1953,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
        { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
        { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
        { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+       { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
        { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
        { },
 };
@@ -2107,6 +2115,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
        int num_irqs, i, err;
+       irqreturn_t (*global_fault)(int irq, void *dev);
 
        smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
        if (!smmu) {
@@ -2123,10 +2132,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       smmu = arm_smmu_impl_init(smmu);
-       if (IS_ERR(smmu))
-               return PTR_ERR(smmu);
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ioaddr = res->start;
        smmu->base = devm_ioremap_resource(dev, res);
@@ -2138,6 +2143,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
         */
        smmu->numpage = resource_size(res);
 
+       smmu = arm_smmu_impl_init(smmu);
+       if (IS_ERR(smmu))
+               return PTR_ERR(smmu);
+
        num_irqs = 0;
        while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
                num_irqs++;
@@ -2193,9 +2202,14 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                smmu->num_context_irqs = smmu->num_context_banks;
        }
 
+       if (smmu->impl && smmu->impl->global_fault)
+               global_fault = smmu->impl->global_fault;
+       else
+               global_fault = arm_smmu_global_fault;
+
        for (i = 0; i < smmu->num_global_irqs; ++i) {
                err = devm_request_irq(smmu->dev, smmu->irqs[i],
-                                      arm_smmu_global_fault,
+                                      global_fault,
                                       IRQF_SHARED,
                                       "arm-smmu global fault",
                                       smmu);
similarity index 98%
rename from drivers/iommu/arm-smmu.h
rename to drivers/iommu/arm/arm-smmu/arm-smmu.h
index d172c02..d890a4a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/io-pgtable.h>
 #include <linux/iommu.h>
+#include <linux/irqreturn.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
@@ -236,6 +237,8 @@ enum arm_smmu_cbar_type {
 /* Maximum number of context banks per SMMU */
 #define ARM_SMMU_MAX_CBS               128
 
+#define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
+#define TLB_SPIN_COUNT                 10
 
 /* Shared driver definitions */
 enum arm_smmu_arch_version {
@@ -387,6 +390,8 @@ struct arm_smmu_impl {
        void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
                         int status);
        int (*def_domain_type)(struct device *dev);
+       irqreturn_t (*global_fault)(int irq, void *dev);
+       irqreturn_t (*context_fault)(int irq, void *dev);
 };
 
 static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -450,6 +455,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
        arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
 
 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
 
 int arm_mmu500_reset(struct arm_smmu_device *smmu);
similarity index 95%
rename from drivers/iommu/qcom_iommu.c
rename to drivers/iommu/arm/arm-smmu/qcom_iommu.c
index d176df5..af6bec3 100644 (file)
 
 #define SMMU_INTR_SEL_NS     0x2000
 
+enum qcom_iommu_clk {
+       CLK_IFACE,
+       CLK_BUS,
+       CLK_TBU,
+       CLK_NUM,
+};
+
 struct qcom_iommu_ctx;
 
 struct qcom_iommu_dev {
        /* IOMMU core code handle */
        struct iommu_device      iommu;
        struct device           *dev;
-       struct clk              *iface_clk;
-       struct clk              *bus_clk;
+       struct clk_bulk_data clks[CLK_NUM];
        void __iomem            *local_base;
        u32                      sec_id;
        u8                       num_ctxs;
@@ -301,7 +307,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
                      ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
                      ARM_SMMU_SCTLR_CFCFG;
 
-               if (IS_ENABLED(CONFIG_BIG_ENDIAN))
+               if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                        reg |= ARM_SMMU_SCTLR_E;
 
                iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
@@ -438,7 +444,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
                return -ENODEV;
 
        spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
-       ret = ops->map(ops, iova, paddr, size, prot);
+       ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
        spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
        return ret;
 }
@@ -613,32 +619,6 @@ static const struct iommu_ops qcom_iommu_ops = {
        .pgsize_bitmap  = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
 };
 
-static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
-       int ret;
-
-       ret = clk_prepare_enable(qcom_iommu->iface_clk);
-       if (ret) {
-               dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
-               return ret;
-       }
-
-       ret = clk_prepare_enable(qcom_iommu->bus_clk);
-       if (ret) {
-               dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
-               clk_disable_unprepare(qcom_iommu->iface_clk);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
-{
-       clk_disable_unprepare(qcom_iommu->bus_clk);
-       clk_disable_unprepare(qcom_iommu->iface_clk);
-}
-
 static int qcom_iommu_sec_ptbl_init(struct device *dev)
 {
        size_t psize = 0;
@@ -795,6 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        struct qcom_iommu_dev *qcom_iommu;
        struct device *dev = &pdev->dev;
        struct resource *res;
+       struct clk *clk;
        int ret, max_asid = 0;
 
        /* find the max asid (which is 1:1 to ctx bank idx), so we know how
@@ -817,17 +798,26 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
                        return PTR_ERR(qcom_iommu->local_base);
        }
 
-       qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
-       if (IS_ERR(qcom_iommu->iface_clk)) {
+       clk = devm_clk_get(dev, "iface");
+       if (IS_ERR(clk)) {
                dev_err(dev, "failed to get iface clock\n");
-               return PTR_ERR(qcom_iommu->iface_clk);
+               return PTR_ERR(clk);
        }
+       qcom_iommu->clks[CLK_IFACE].clk = clk;
 
-       qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
-       if (IS_ERR(qcom_iommu->bus_clk)) {
+       clk = devm_clk_get(dev, "bus");
+       if (IS_ERR(clk)) {
                dev_err(dev, "failed to get bus clock\n");
-               return PTR_ERR(qcom_iommu->bus_clk);
+               return PTR_ERR(clk);
+       }
+       qcom_iommu->clks[CLK_BUS].clk = clk;
+
+       clk = devm_clk_get_optional(dev, "tbu");
+       if (IS_ERR(clk)) {
+               dev_err(dev, "failed to get tbu clock\n");
+               return PTR_ERR(clk);
        }
+       qcom_iommu->clks[CLK_TBU].clk = clk;
 
        if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
                                 &qcom_iommu->sec_id)) {
@@ -899,14 +889,14 @@ static int __maybe_unused qcom_iommu_resume(struct device *dev)
 {
        struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
 
-       return qcom_iommu_enable_clocks(qcom_iommu);
+       return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
 }
 
 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
 {
        struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
 
-       qcom_iommu_disable_clocks(qcom_iommu);
+       clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
 
        return 0;
 }
index 4959f5d..5141d49 100644 (file)
@@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 
        if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            !gfpflags_allow_blocking(gfp) && !coherent)
-               cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
-                                              gfp);
+               page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
+                                              gfp, NULL);
        else
                cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
        if (!cpu_addr)
index 60c8a56..bad3c0c 100644 (file)
@@ -173,7 +173,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
 #define REG_V5_FAULT_AR_VA     0x070
 #define REG_V5_FAULT_AW_VA     0x080
 
-#define has_sysmmu(dev)                (dev->archdata.iommu != NULL)
+#define has_sysmmu(dev)                (dev_iommu_priv_get(dev) != NULL)
 
 static struct device *dma_dev;
 static struct kmem_cache *lv2table_kmem_cache;
@@ -226,7 +226,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
 };
 
 /*
- * This structure is attached to dev.archdata.iommu of the master device
+ * This structure is attached to dev->iommu->priv of the master device
  * on device add, contains a list of SYSMMU controllers defined by device tree,
  * which are bound to given master device. It is usually referenced by 'owner'
  * pointer.
@@ -670,7 +670,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
        struct device *master = data->master;
 
        if (master) {
-               struct exynos_iommu_owner *owner = master->archdata.iommu;
+               struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 
                mutex_lock(&owner->rpm_lock);
                if (data->domain) {
@@ -688,7 +688,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
        struct device *master = data->master;
 
        if (master) {
-               struct exynos_iommu_owner *owner = master->archdata.iommu;
+               struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 
                mutex_lock(&owner->rpm_lock);
                if (data->domain) {
@@ -721,7 +721,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
        }
 };
 
-static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
+static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
 {
        dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
                                DMA_TO_DEVICE);
@@ -837,8 +837,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
                                    struct device *dev)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        phys_addr_t pagetable = virt_to_phys(domain->pgtable);
        struct sysmmu_drvdata *data, *next;
        unsigned long flags;
@@ -875,8 +875,8 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
                                   struct device *dev)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        struct sysmmu_drvdata *data;
        phys_addr_t pagetable = virt_to_phys(domain->pgtable);
        unsigned long flags;
@@ -933,7 +933,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
                if (!pent)
                        return ERR_PTR(-ENOMEM);
 
-               update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
+               exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
                kmemleak_ignore(pent);
                *pgcounter = NUM_LV2ENTRIES;
                handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
@@ -994,7 +994,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
                *pgcnt = 0;
        }
 
-       update_pte(sent, mk_lv1ent_sect(paddr, prot));
+       exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
 
        spin_lock(&domain->lock);
        if (lv1ent_page_zero(sent)) {
@@ -1018,7 +1018,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
                if (WARN_ON(!lv2ent_fault(pent)))
                        return -EADDRINUSE;
 
-               update_pte(pent, mk_lv2ent_spage(paddr, prot));
+               exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
                *pgcnt -= 1;
        } else { /* size == LPAGE_SIZE */
                int i;
@@ -1150,7 +1150,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
                }
 
                /* workaround for h/w bug in System MMU v3.3 */
-               update_pte(ent, ZERO_LV2LINK);
+               exynos_iommu_set_pte(ent, ZERO_LV2LINK);
                size = SECT_SIZE;
                goto done;
        }
@@ -1171,7 +1171,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
        }
 
        if (lv2ent_small(ent)) {
-               update_pte(ent, 0);
+               exynos_iommu_set_pte(ent, 0);
                size = SPAGE_SIZE;
                domain->lv2entcnt[lv1ent_offset(iova)] += 1;
                goto done;
@@ -1237,7 +1237,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
 
 static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        struct sysmmu_drvdata *data;
 
        if (!has_sysmmu(dev))
@@ -1263,7 +1263,7 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
 
 static void exynos_iommu_release_device(struct device *dev)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        struct sysmmu_drvdata *data;
 
        if (!has_sysmmu(dev))
@@ -1287,8 +1287,8 @@ static void exynos_iommu_release_device(struct device *dev)
 static int exynos_iommu_of_xlate(struct device *dev,
                                 struct of_phandle_args *spec)
 {
-       struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct platform_device *sysmmu = of_find_device_by_node(spec->np);
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        struct sysmmu_drvdata *data, *entry;
 
        if (!sysmmu)
@@ -1305,7 +1305,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
 
                INIT_LIST_HEAD(&owner->controllers);
                mutex_init(&owner->rpm_lock);
-               dev->archdata.iommu = owner;
+               dev_iommu_priv_set(dev, owner);
        }
 
        list_for_each_entry(entry, &owner->controllers, owner_node)
index cde281b..099a11a 100644 (file)
@@ -1174,10 +1174,7 @@ error:
        if (irq != NO_IRQ)
                free_irq(irq, data);
 
-       if (data) {
-               memset(data, 0, sizeof(struct pamu_isr_data));
-               kfree(data);
-       }
+       kzfree(data);
 
        if (pamu_regs)
                iounmap(pamu_regs);
index 928d377..b211076 100644 (file)
@@ -323,7 +323,7 @@ static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
        pamu_disable_liodn(info->liodn);
        spin_unlock_irqrestore(&iommu_lock, flags);
        spin_lock_irqsave(&device_domain_lock, flags);
-       info->dev->archdata.iommu_domain = NULL;
+       dev_iommu_priv_set(info->dev, NULL);
        kmem_cache_free(iommu_devinfo_cache, info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -352,7 +352,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * Check here if the device is already attached to domain or not.
         * If the device is already attached to a domain detach it.
         */
-       old_domain_info = dev->archdata.iommu_domain;
+       old_domain_info = dev_iommu_priv_get(dev);
        if (old_domain_info && old_domain_info->domain != dma_domain) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                detach_device(dev, old_domain_info->domain);
@@ -371,8 +371,8 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * the info for the first LIODN as all
         * LIODNs share the same domain
         */
-       if (!dev->archdata.iommu_domain)
-               dev->archdata.iommu_domain = info;
+       if (!dev_iommu_priv_get(dev))
+               dev_iommu_priv_set(dev, info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
new file mode 100644 (file)
index 0000000..5337ee1
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Intel IOMMU support
+config DMAR_TABLE
+       bool
+
+config INTEL_IOMMU
+       bool "Support for Intel IOMMU using DMA Remapping Devices"
+       depends on PCI_MSI && ACPI && (X86 || IA64)
+       select DMA_OPS
+       select IOMMU_API
+       select IOMMU_IOVA
+       select NEED_DMA_MAP_STATE
+       select DMAR_TABLE
+       select SWIOTLB
+       select IOASID
+       help
+         DMA remapping (DMAR) devices support enables independent address
+         translations for Direct Memory Access (DMA) from devices.
+         These DMA remapping devices are reported via ACPI tables
+         and include PCI device scope covered by these DMA
+         remapping devices.
+
+config INTEL_IOMMU_DEBUGFS
+       bool "Export Intel IOMMU internals in Debugfs"
+       depends on INTEL_IOMMU && IOMMU_DEBUGFS
+       help
+         !!!WARNING!!!
+
+         DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+         Expose Intel IOMMU internals in Debugfs.
+
+         This option is -NOT- intended for production environments, and should
+         only be enabled for debugging Intel IOMMU.
+
+config INTEL_IOMMU_SVM
+       bool "Support for Shared Virtual Memory with Intel IOMMU"
+       depends on INTEL_IOMMU && X86_64
+       select PCI_PASID
+       select PCI_PRI
+       select MMU_NOTIFIER
+       select IOASID
+       help
+         Shared Virtual Memory (SVM) provides a facility for devices
+         to access DMA resources through process address space by
+         means of a Process Address Space ID (PASID).
+
+config INTEL_IOMMU_DEFAULT_ON
+       def_bool y
+       prompt "Enable Intel DMA Remapping Devices by default"
+       depends on INTEL_IOMMU
+       help
+         Selecting this option will enable a DMAR device at boot time if
+         one is found. If this option is not selected, DMAR support can
+         be enabled by passing intel_iommu=on to the kernel.
+
+config INTEL_IOMMU_BROKEN_GFX_WA
+       bool "Workaround broken graphics drivers (going away soon)"
+       depends on INTEL_IOMMU && BROKEN && X86
+       help
+         Current Graphics drivers tend to use physical address
+         for DMA and avoid using DMA APIs. Setting this config
+         option permits the IOMMU driver to set a unity map for
+         all the OS-visible memory. Hence the driver can continue
+         to use physical addresses for DMA, at least until this
+         option is removed in the 2.6.32 kernel.
+
+config INTEL_IOMMU_FLOPPY_WA
+       def_bool y
+       depends on INTEL_IOMMU && X86
+       help
+         Floppy disk drivers are known to bypass DMA API calls
+         thereby failing to work when IOMMU is enabled. This
+         workaround will setup a 1:1 mapping for the first
+         16MiB to make floppy (an ISA device) work.
+
+config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+       bool "Enable Intel IOMMU scalable mode by default"
+       depends on INTEL_IOMMU
+       help
+         Selecting this option will enable by default the scalable mode if
+         hardware presents the capability. The scalable mode is defined in
+         VT-d 3.0. The scalable mode capability could be checked by reading
+         /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option
+         is not selected, scalable mode support could also be enabled by
+         passing intel_iommu=sm_on to the kernel. If not sure, please use
+         the default value.
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
new file mode 100644 (file)
index 0000000..fb8e1e8
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMAR_TABLE) += dmar.o
+obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o
+obj-$(CONFIG_INTEL_IOMMU) += trace.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
+obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
index cf1ebb9..efea7f0 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <asm/irq_remapping.h>
 
-#include "intel-pasid.h"
+#include "pasid.h"
 
 struct tbl_walk {
        u16 bus;
index 683b812..93e6345 100644 (file)
@@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        }
 
        drhd->iommu = iommu;
+       iommu->drhd = drhd;
 
        return 0;
 
@@ -1438,8 +1439,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
 
 /* PASID-based device IOTLB Invalidate */
 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
-                             u32 pasid,  u16 qdep, u64 addr,
-                             unsigned int size_order, u64 granu)
+                             u32 pasid,  u16 qdep, u64 addr, unsigned int size_order)
 {
        unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
        struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1447,7 +1447,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
                QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
                QI_DEV_IOTLB_PFSID(pfsid);
-       desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
 
        /*
         * If S bit is 0, we only flush a single page. If S bit is set,
@@ -1458,9 +1457,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
         * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
         * ECAP.
         */
-       desc.qw1 |= addr & ~mask;
-       if (size_order)
+       if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+               pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
+                                   addr, size_order);
+
+       /* Take page address */
+       desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
+
+       if (size_order) {
+               /*
+                * Existing 0s in address below size_order may be the least
+                * significant bit, we must set them to 1s to avoid having
+                * smaller size than desired.
+                */
+               desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
+                                       VTD_PAGE_SHIFT);
+               /* Clear size_order bit to indicate size */
+               desc.qw1 &= ~mask;
+               /* Set the S bit to indicate flushing more than 1 page */
                desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+       }
 
        qi_submit_sync(iommu, &desc, 1, 0);
 }
index 237a470..e9864e5 100644 (file)
@@ -48,7 +48,7 @@
 #include <trace/events/intel_iommu.h>
 
 #include "../irq_remapping.h"
-#include "intel-pasid.h"
+#include "pasid.h"
 
 #define ROOT_SIZE              VTD_PAGE_SIZE
 #define CONTEXT_SIZE           VTD_PAGE_SIZE
@@ -356,6 +356,7 @@ static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int iommu_identity_mapping;
 static int intel_no_bounce;
+static int iommu_skip_te_disable;
 
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
@@ -372,7 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
        if (!dev)
                return NULL;
 
-       info = dev->archdata.iommu;
+       info = dev_iommu_priv_get(dev);
        if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
                     info == DEFER_DEVICE_DOMAIN_INFO))
                return NULL;
@@ -743,12 +744,12 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
 
 static int iommu_dummy(struct device *dev)
 {
-       return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+       return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
 }
 
 static bool attach_deferred(struct device *dev)
 {
-       return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+       return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
 }
 
 /**
@@ -778,16 +779,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
        return false;
 }
 
-static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
        struct dmar_drhd_unit *drhd = NULL;
+       struct pci_dev *pdev = NULL;
        struct intel_iommu *iommu;
        struct device *tmp;
-       struct pci_dev *pdev = NULL;
        u16 segment = 0;
        int i;
 
-       if (iommu_dummy(dev))
+       if (!dev || iommu_dummy(dev))
                return NULL;
 
        if (dev_is_pci(dev)) {
@@ -818,8 +819,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
                                if (pdev && pdev->is_virtfn)
                                        goto got_pdev;
 
-                               *bus = drhd->devices[i].bus;
-                               *devfn = drhd->devices[i].devfn;
+                               if (bus && devfn) {
+                                       *bus = drhd->devices[i].bus;
+                                       *devfn = drhd->devices[i].devfn;
+                               }
                                goto out;
                        }
 
@@ -829,8 +832,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
 
                if (pdev && drhd->include_all) {
                got_pdev:
-                       *bus = pdev->bus->number;
-                       *devfn = pdev->devfn;
+                       if (bus && devfn) {
+                               *bus = pdev->bus->number;
+                               *devfn = pdev->devfn;
+                       }
                        goto out;
                }
        }
@@ -1629,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
        u32 sts;
        unsigned long flag;
 
+       if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
+           (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
+               return;
+
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
        iommu->gcmd &= ~DMA_GCMD_TE;
        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@ -2420,7 +2429,7 @@ static inline void unlink_domain_info(struct device_domain_info *info)
        list_del(&info->link);
        list_del(&info->global);
        if (info->dev)
-               info->dev->archdata.iommu = NULL;
+               dev_iommu_priv_set(info->dev, NULL);
 }
 
 static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -2453,7 +2462,7 @@ static void do_deferred_attach(struct device *dev)
 {
        struct iommu_domain *domain;
 
-       dev->archdata.iommu = NULL;
+       dev_iommu_priv_set(dev, NULL);
        domain = iommu_get_domain_for_dev(dev);
        if (domain)
                intel_iommu_attach_device(domain, dev);
@@ -2599,7 +2608,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        list_add(&info->link, &domain->devices);
        list_add(&info->global, &device_domain_list);
        if (dev)
-               dev->archdata.iommu = info;
+               dev_iommu_priv_set(dev, info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
@@ -4004,7 +4013,7 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
        if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
                pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
                add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
-               pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+               dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
        }
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@@ -4039,11 +4048,12 @@ static void __init init_no_remapping_devices(void)
 
                /* This IOMMU has *only* gfx devices. Either bypass it or
                   set the gfx_mapped flag, as appropriate */
+               drhd->gfx_dedicated = 1;
                if (!dmar_map_gfx) {
                        drhd->ignored = 1;
                        for_each_active_dev_scope(drhd->devices,
                                                  drhd->devices_cnt, i, dev)
-                               dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+                               dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
                }
        }
 }
@@ -5146,11 +5156,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
                              struct device *dev)
 {
        int ret;
-       u8 bus, devfn;
        unsigned long flags;
        struct intel_iommu *iommu;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu)
                return -ENODEV;
 
@@ -5236,9 +5245,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
        struct intel_iommu *iommu;
        int addr_width;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu)
                return -ENODEV;
 
@@ -5416,7 +5424,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
        sid = PCI_DEVID(bus, devfn);
 
        /* Size is only valid in address selective invalidation */
-       if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+       if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
                size = to_vtd_size(inv_info->addr_info.granule_size,
                                   inv_info->addr_info.nb_granules);
 
@@ -5425,6 +5433,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
                         IOMMU_CACHE_INV_TYPE_NR) {
                int granu = 0;
                u64 pasid = 0;
+               u64 addr = 0;
 
                granu = to_vtd_granularity(cache_type, inv_info->granularity);
                if (granu == -EINVAL) {
@@ -5446,13 +5455,12 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
 
                switch (BIT(cache_type)) {
                case IOMMU_CACHE_INV_TYPE_IOTLB:
+                       /* HW will ignore LSB bits based on address mask */
                        if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
                            size &&
                            (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
-                               pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+                               pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
                                                   inv_info->addr_info.addr, size);
-                               ret = -ERANGE;
-                               goto out_unlock;
                        }
 
                        /*
@@ -5464,25 +5472,35 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
                                        (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
                                        inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
 
+                       if (!info->ats_enabled)
+                               break;
                        /*
                         * Always flush device IOTLB if ATS is enabled. vIOMMU
                         * in the guest may assume IOTLB flush is inclusive,
                         * which is more efficient.
                         */
-                       if (info->ats_enabled)
-                               qi_flush_dev_iotlb_pasid(iommu, sid,
-                                               info->pfsid, pasid,
-                                               info->ats_qdep,
-                                               inv_info->addr_info.addr,
-                                               size, granu);
-                       break;
+                       fallthrough;
                case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+                       /*
+                        * PASID based device TLB invalidation does not support
+                        * IOMMU_INV_GRANU_PASID granularity but only supports
+                        * IOMMU_INV_GRANU_ADDR.
+                        * The equivalent of that is we set the size to be the
+                        * entire range of 64 bit. User only provides PASID info
+                        * without address info. So we set addr to 0.
+                        */
+                       if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
+                               size = 64 - VTD_PAGE_SHIFT;
+                               addr = 0;
+                       } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
+                               addr = inv_info->addr_info.addr;
+                       }
+
                        if (info->ats_enabled)
                                qi_flush_dev_iotlb_pasid(iommu, sid,
                                                info->pfsid, pasid,
-                                               info->ats_qdep,
-                                               inv_info->addr_info.addr,
-                                               size, granu);
+                                               info->ats_qdep, addr,
+                                               size);
                        else
                                pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
                        break;
@@ -5658,14 +5676,13 @@ static bool intel_iommu_capable(enum iommu_cap cap)
 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
 {
        struct intel_iommu *iommu;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu)
                return ERR_PTR(-ENODEV);
 
        if (translation_pre_enabled(iommu))
-               dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+               dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
 
        return &iommu->iommu;
 }
@@ -5673,9 +5690,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
 static void intel_iommu_release_device(struct device *dev)
 {
        struct intel_iommu *iommu;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu)
                return;
 
@@ -5825,37 +5841,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
        return generic_device_group(dev);
 }
 
-#ifdef CONFIG_INTEL_IOMMU_SVM
-struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
-{
-       struct intel_iommu *iommu;
-       u8 bus, devfn;
-
-       if (iommu_dummy(dev)) {
-               dev_warn(dev,
-                        "No IOMMU translation for device; cannot enable SVM\n");
-               return NULL;
-       }
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if ((!iommu)) {
-               dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
-               return NULL;
-       }
-
-       return iommu;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
 static int intel_iommu_enable_auxd(struct device *dev)
 {
        struct device_domain_info *info;
        struct intel_iommu *iommu;
        unsigned long flags;
-       u8 bus, devfn;
        int ret;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu || dmar_disabled)
                return -EINVAL;
 
@@ -6080,6 +6073,7 @@ const struct iommu_ops intel_iommu_ops = {
        .sva_bind               = intel_svm_bind,
        .sva_unbind             = intel_svm_unbind,
        .sva_get_pasid          = intel_svm_get_pasid,
+       .page_response          = intel_svm_page_response,
 #endif
 };
 
@@ -6182,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
 
+static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
+{
+       unsigned short ver;
+
+       if (!IS_GFX_DEVICE(dev))
+               return;
+
+       ver = (dev->device >> 8) & 0xff;
+       if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
+           ver != 0x4e && ver != 0x8a && ver != 0x98 &&
+           ver != 0x9a)
+               return;
+
+       if (risky_device(dev))
+               return;
+
+       pci_info(dev, "Skip IOMMU disabling for graphics\n");
+       iommu_skip_te_disable = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
+
 /* On Tylersburg chipsets, some BIOSes have been known to enable the
    ISOCH DMAR unit for the Azalia sound device, but not give it any
    TLB entries, which causes it to deadlock. Check for that.  We do
index aa096b3..23583b0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/irqdomain.h>
 #include <linux/crash_dump.h>
 #include <asm/io_apic.h>
+#include <asm/apic.h>
 #include <asm/smp.h>
 #include <asm/cpu.h>
 #include <asm/irq_remapping.h>
index c81f0f1..e6faedf 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/pci-ats.h>
 #include <linux/spinlock.h>
 
-#include "intel-pasid.h"
+#include "pasid.h"
 
 /*
  * Intel IOMMU system wide PASID name space:
@@ -486,7 +486,16 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
        qdep = info->ats_qdep;
        pfsid = info->pfsid;
 
-       qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+       /*
+        * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
+        * devTLB flush w/o PASID should be used. For non-zero PASID under
+        * SVA usage, device could do DMA with multiple PASIDs. It is more
+        * efficient to flush devTLB specific to the PASID.
+        */
+       if (pasid == PASID_RID2PASID)
+               qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
+       else
+               qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
 }
 
 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
similarity index 98%
rename from drivers/iommu/intel/intel-pasid.h
rename to drivers/iommu/intel/pasid.h
index c5318d4..c985076 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * intel-pasid.h - PASID idr, table and entry header
+ * pasid.h - PASID idr, table and entry header
  *
  * Copyright (C) 2018 Intel Corporation
  *
index 6c87c80..95c3164 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/ioasid.h>
 #include <asm/page.h>
 
-#include "intel-pasid.h"
+#include "pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
 static void intel_svm_drain_prq(struct device *dev, int pasid);
@@ -228,13 +228,57 @@ static LIST_HEAD(global_svm_list);
        list_for_each_entry((sdev), &(svm)->devs, list) \
                if ((d) != (sdev)->dev) {} else
 
+static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
+                            struct intel_svm **rsvm,
+                            struct intel_svm_dev **rsdev)
+{
+       struct intel_svm_dev *d, *sdev = NULL;
+       struct intel_svm *svm;
+
+       /* The caller should hold the pasid_mutex lock */
+       if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
+               return -EINVAL;
+
+       if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
+               return -EINVAL;
+
+       svm = ioasid_find(NULL, pasid, NULL);
+       if (IS_ERR(svm))
+               return PTR_ERR(svm);
+
+       if (!svm)
+               goto out;
+
+       /*
+        * If we found svm for the PASID, there must be at least one device
+        * bond.
+        */
+       if (WARN_ON(list_empty(&svm->devs)))
+               return -EINVAL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(d, &svm->devs, list) {
+               if (d->dev == dev) {
+                       sdev = d;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+out:
+       *rsvm = svm;
+       *rsdev = sdev;
+
+       return 0;
+}
+
 int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
                          struct iommu_gpasid_bind_data *data)
 {
-       struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+       struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+       struct intel_svm_dev *sdev = NULL;
        struct dmar_domain *dmar_domain;
-       struct intel_svm_dev *sdev;
-       struct intel_svm *svm;
+       struct intel_svm *svm = NULL;
        int ret = 0;
 
        if (WARN_ON(!iommu) || !data)
@@ -261,39 +305,23 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
        dmar_domain = to_dmar_domain(domain);
 
        mutex_lock(&pasid_mutex);
-       svm = ioasid_find(NULL, data->hpasid, NULL);
-       if (IS_ERR(svm)) {
-               ret = PTR_ERR(svm);
+       ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
+       if (ret)
                goto out;
-       }
 
-       if (svm) {
+       if (sdev) {
                /*
-                * If we found svm for the PASID, there must be at
-                * least one device bond, otherwise svm should be freed.
+                * Do not allow multiple bindings of the same device-PASID since
+                * there is only one SL page tables per PASID. We may revisit
+                * once sharing PGD across domains are supported.
                 */
-               if (WARN_ON(list_empty(&svm->devs))) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+               dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
+                                    svm->pasid);
+               ret = -EBUSY;
+               goto out;
+       }
 
-               for_each_svm_dev(sdev, svm, dev) {
-                       /*
-                        * For devices with aux domains, we should allow
-                        * multiple bind calls with the same PASID and pdev.
-                        */
-                       if (iommu_dev_feature_enabled(dev,
-                                                     IOMMU_DEV_FEAT_AUX)) {
-                               sdev->users++;
-                       } else {
-                               dev_warn_ratelimited(dev,
-                                                    "Already bound with PASID %u\n",
-                                                    svm->pasid);
-                               ret = -EBUSY;
-                       }
-                       goto out;
-               }
-       } else {
+       if (!svm) {
                /* We come here when PASID has never been bond to a device. */
                svm = kzalloc(sizeof(*svm), GFP_KERNEL);
                if (!svm) {
@@ -373,28 +401,20 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
 
 int intel_svm_unbind_gpasid(struct device *dev, int pasid)
 {
-       struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+       struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
        struct intel_svm_dev *sdev;
        struct intel_svm *svm;
-       int ret = -EINVAL;
+       int ret;
 
        if (WARN_ON(!iommu))
                return -EINVAL;
 
        mutex_lock(&pasid_mutex);
-       svm = ioasid_find(NULL, pasid, NULL);
-       if (!svm) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (IS_ERR(svm)) {
-               ret = PTR_ERR(svm);
+       ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+       if (ret)
                goto out;
-       }
 
-       for_each_svm_dev(sdev, svm, dev) {
-               ret = 0;
+       if (sdev) {
                if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
                        sdev->users--;
                if (!sdev->users) {
@@ -418,7 +438,6 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid)
                                kfree(svm);
                        }
                }
-               break;
        }
 out:
        mutex_unlock(&pasid_mutex);
@@ -430,7 +449,7 @@ static int
 intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
                  struct mm_struct *mm, struct intel_svm_dev **sd)
 {
-       struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+       struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
        struct device_domain_info *info;
        struct intel_svm_dev *sdev;
        struct intel_svm *svm = NULL;
@@ -596,7 +615,7 @@ success:
        if (sd)
                *sd = sdev;
        ret = 0;
- out:
+out:
        return ret;
 }
 
@@ -608,21 +627,15 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
        struct intel_svm *svm;
        int ret = -EINVAL;
 
-       iommu = intel_svm_device_to_iommu(dev);
+       iommu = device_to_iommu(dev, NULL, NULL);
        if (!iommu)
                goto out;
 
-       svm = ioasid_find(NULL, pasid, NULL);
-       if (!svm)
-               goto out;
-
-       if (IS_ERR(svm)) {
-               ret = PTR_ERR(svm);
+       ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
+       if (ret)
                goto out;
-       }
 
-       for_each_svm_dev(sdev, svm, dev) {
-               ret = 0;
+       if (sdev) {
                sdev->users--;
                if (!sdev->users) {
                        list_del_rcu(&sdev->list);
@@ -651,10 +664,8 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
                                kfree(svm);
                        }
                }
-               break;
        }
- out:
-
+out:
        return ret;
 }
 
@@ -800,8 +811,63 @@ qi_retry:
        }
 }
 
+static int prq_to_iommu_prot(struct page_req_dsc *req)
+{
+       int prot = 0;
+
+       if (req->rd_req)
+               prot |= IOMMU_FAULT_PERM_READ;
+       if (req->wr_req)
+               prot |= IOMMU_FAULT_PERM_WRITE;
+       if (req->exe_req)
+               prot |= IOMMU_FAULT_PERM_EXEC;
+       if (req->pm_req)
+               prot |= IOMMU_FAULT_PERM_PRIV;
+
+       return prot;
+}
+
+static int
+intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+{
+       struct iommu_fault_event event;
+
+       if (!dev || !dev_is_pci(dev))
+               return -ENODEV;
+
+       /* Fill in event data for device specific processing */
+       memset(&event, 0, sizeof(struct iommu_fault_event));
+       event.fault.type = IOMMU_FAULT_PAGE_REQ;
+       event.fault.prm.addr = desc->addr;
+       event.fault.prm.pasid = desc->pasid;
+       event.fault.prm.grpid = desc->prg_index;
+       event.fault.prm.perm = prq_to_iommu_prot(desc);
+
+       if (desc->lpig)
+               event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+       if (desc->pasid_present) {
+               event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+               event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+       }
+       if (desc->priv_data_present) {
+               /*
+                * Set last page in group bit if private data is present,
+                * page response is required as it does for LPIG.
+                * iommu_report_device_fault() doesn't understand this vendor
+                * specific requirement thus we set last_page as a workaround.
+                */
+               event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+               event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+               memcpy(event.fault.prm.private_data, desc->priv_data,
+                      sizeof(desc->priv_data));
+       }
+
+       return iommu_report_device_fault(dev, &event);
+}
+
 static irqreturn_t prq_event_thread(int irq, void *d)
 {
+       struct intel_svm_dev *sdev = NULL;
        struct intel_iommu *iommu = d;
        struct intel_svm *svm = NULL;
        int head, tail, handled = 0;
@@ -813,7 +879,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
        tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
        head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
        while (head != tail) {
-               struct intel_svm_dev *sdev;
                struct vm_area_struct *vma;
                struct page_req_dsc *req;
                struct qi_desc resp;
@@ -849,6 +914,20 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        }
                }
 
+               if (!sdev || sdev->sid != req->rid) {
+                       struct intel_svm_dev *t;
+
+                       sdev = NULL;
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(t, &svm->devs, list) {
+                               if (t->sid == req->rid) {
+                                       sdev = t;
+                                       break;
+                               }
+                       }
+                       rcu_read_unlock();
+               }
+
                result = QI_RESP_INVALID;
                /* Since we're using init_mm.pgd directly, we should never take
                 * any faults on kernel addresses. */
@@ -859,6 +938,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                if (!is_canonical_address(address))
                        goto bad_req;
 
+               /*
+                * If prq is to be handled outside iommu driver via receiver of
+                * the fault notifiers, we skip the page response here.
+                */
+               if (svm->flags & SVM_FLAG_GUEST_MODE) {
+                       if (sdev && !intel_svm_prq_report(sdev->dev, req))
+                               goto prq_advance;
+                       else
+                               goto bad_req;
+               }
+
                /* If the mm is already defunct, don't handle faults. */
                if (!mmget_not_zero(svm->mm))
                        goto bad_req;
@@ -872,29 +962,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        goto invalid;
 
                ret = handle_mm_fault(vma, address,
-                                     req->wr_req ? FAULT_FLAG_WRITE : 0);
+                                     req->wr_req ? FAULT_FLAG_WRITE : 0,
+                                     NULL);
                if (ret & VM_FAULT_ERROR)
                        goto invalid;
 
                result = QI_RESP_SUCCESS;
-       invalid:
+invalid:
                mmap_read_unlock(svm->mm);
                mmput(svm->mm);
-       bad_req:
-               /* Accounting for major/minor faults? */
-               rcu_read_lock();
-               list_for_each_entry_rcu(sdev, &svm->devs, list) {
-                       if (sdev->sid == req->rid)
-                               break;
-               }
-               /* Other devices can go away, but the drivers are not permitted
-                * to unbind while any page faults might be in flight. So it's
-                * OK to drop the 'lock' here now we have it. */
-               rcu_read_unlock();
-
-               if (WARN_ON(&sdev->list == &svm->devs))
-                       sdev = NULL;
-
+bad_req:
+               WARN_ON(!sdev);
                if (sdev && sdev->ops && sdev->ops->fault_cb) {
                        int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
                                (req->exe_req << 1) | (req->pm_req);
@@ -905,7 +983,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                   and these can be NULL. Do not use them below this point! */
                sdev = NULL;
                svm = NULL;
-       no_pasid:
+no_pasid:
                if (req->lpig || req->priv_data_present) {
                        /*
                         * Per VT-d spec. v3.0 ch7.7, system software must
@@ -930,6 +1008,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        resp.qw3 = 0;
                        qi_submit_sync(iommu, &resp, 1, 0);
                }
+prq_advance:
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }
 
@@ -1000,3 +1079,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva)
 
        return pasid;
 }
+
+int intel_svm_page_response(struct device *dev,
+                           struct iommu_fault_event *evt,
+                           struct iommu_page_response *msg)
+{
+       struct iommu_fault_page_request *prm;
+       struct intel_svm_dev *sdev = NULL;
+       struct intel_svm *svm = NULL;
+       struct intel_iommu *iommu;
+       bool private_present;
+       bool pasid_present;
+       bool last_page;
+       u8 bus, devfn;
+       int ret = 0;
+       u16 sid;
+
+       if (!dev || !dev_is_pci(dev))
+               return -ENODEV;
+
+       iommu = device_to_iommu(dev, &bus, &devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       if (!msg || !evt)
+               return -EINVAL;
+
+       mutex_lock(&pasid_mutex);
+
+       prm = &evt->fault.prm;
+       sid = PCI_DEVID(bus, devfn);
+       pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+       private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
+       last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+
+       if (!pasid_present) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
+       if (ret || !sdev) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       /*
+        * For responses from userspace, need to make sure that the
+        * pasid has been bound to its mm.
+        */
+       if (svm->flags & SVM_FLAG_GUEST_MODE) {
+               struct mm_struct *mm;
+
+               mm = get_task_mm(current);
+               if (!mm) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               if (mm != svm->mm) {
+                       ret = -ENODEV;
+                       mmput(mm);
+                       goto out;
+               }
+
+               mmput(mm);
+       }
+
+       /*
+        * Per VT-d spec. v3.0 ch7.7, system software must respond
+        * with page group response if private data is present (PDP)
+        * or last page in group (LPIG) bit is set. This is an
+        * additional VT-d requirement beyond PCI ATS spec.
+        */
+       if (last_page || private_present) {
+               struct qi_desc desc;
+
+               desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
+                               QI_PGRP_PASID_P(pasid_present) |
+                               QI_PGRP_PDP(private_present) |
+                               QI_PGRP_RESP_CODE(msg->code) |
+                               QI_PGRP_RESP_TYPE;
+               desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+               desc.qw2 = 0;
+               desc.qw3 = 0;
+               if (private_present)
+                       memcpy(&desc.qw2, prm->private_data,
+                              sizeof(prm->private_data));
+
+               qi_submit_sync(iommu, &desc, 1, 0);
+       }
+out:
+       mutex_unlock(&pasid_mutex);
+       return ret;
+}
index 4272fe4..a688f22 100644 (file)
@@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
 
 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
                         phys_addr_t paddr, size_t size, int prot,
-                        int lvl, arm_v7s_iopte *ptep)
+                        int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
 {
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_v7s_iopte pte, *cptep;
@@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = READ_ONCE(*ptep);
        if (!pte) {
-               cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+               cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
                if (!cptep)
                        return -ENOMEM;
 
@@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
        }
 
        /* Rinse, repeat */
-       return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+       return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
 }
 
 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
-                       phys_addr_t paddr, size_t size, int prot)
+                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable *iop = &data->iop;
@@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
                    paddr >= (1ULL << data->iop.cfg.oas)))
                return -ERANGE;
 
-       ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+       ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
        /*
         * Synchronise all PTE updates for the new mapping before there's
         * a chance for anything to kick off a table walk for the new iova.
@@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void)
                if (ops->map(ops, iova, iova, size, IOMMU_READ |
                                                    IOMMU_WRITE |
                                                    IOMMU_NOEXEC |
-                                                   IOMMU_CACHE))
+                                                   IOMMU_CACHE, GFP_KERNEL))
                        return __FAIL(ops);
 
                /* Overlapping mappings */
                if (!ops->map(ops, iova, iova + size, size,
-                             IOMMU_READ | IOMMU_NOEXEC))
+                             IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
                        return __FAIL(ops);
 
                if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void)
                        return __FAIL(ops);
 
                /* Remap of partial unmap */
-               if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
+               if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
                        return __FAIL(ops);
 
                if (ops->iova_to_phys(ops, iova_start + size + 42)
@@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void)
                        return __FAIL(ops);
 
                /* Remap full block */
-               if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+               if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
                        return __FAIL(ops);
 
                if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
index 04fbd4b..dc7bcf8 100644 (file)
@@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 
 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
                          phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
-                         int lvl, arm_lpae_iopte *ptep)
+                         int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
 {
        arm_lpae_iopte *cptep, pte;
        size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
@@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = READ_ONCE(*ptep);
        if (!pte) {
-               cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
+               cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
                if (!cptep)
                        return -ENOMEM;
 
@@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        }
 
        /* Rinse, repeat */
-       return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+       return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
 }
 
 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
                else if (prot & IOMMU_CACHE)
                        pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
                                << ARM_LPAE_PTE_ATTRINDX_SHIFT);
-               else if (prot & IOMMU_SYS_CACHE_ONLY)
-                       pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
-                               << ARM_LPAE_PTE_ATTRINDX_SHIFT);
        }
 
        if (prot & IOMMU_CACHE)
@@ -461,7 +458,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 }
 
 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
-                       phys_addr_t paddr, size_t size, int iommu_prot)
+                       phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
@@ -483,7 +480,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
                return -ERANGE;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
-       ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+       ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
        /*
         * Synchronise all PTE updates for the new mapping before there's
         * a chance for anything to kick off a table walk for the new iova.
@@ -1178,12 +1175,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
                        if (ops->map(ops, iova, iova, size, IOMMU_READ |
                                                            IOMMU_WRITE |
                                                            IOMMU_NOEXEC |
-                                                           IOMMU_CACHE))
+                                                           IOMMU_CACHE, GFP_KERNEL))
                                return __FAIL(ops, i);
 
                        /* Overlapping mappings */
                        if (!ops->map(ops, iova, iova + size, size,
-                                     IOMMU_READ | IOMMU_NOEXEC))
+                                     IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
                                return __FAIL(ops, i);
 
                        if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
@@ -1198,7 +1195,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
                        return __FAIL(ops, i);
 
                /* Remap of partial unmap */
-               if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+               if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
                        return __FAIL(ops, i);
 
                if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
@@ -1216,7 +1213,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
                                return __FAIL(ops, i);
 
                        /* Remap full block */
-                       if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+                       if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
                                return __FAIL(ops, i);
 
                        if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
index b6858ad..609bd25 100644 (file)
@@ -383,8 +383,8 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
  * Elements are sorted by start address and overlapping segments
  * of the same type are merged.
  */
-int iommu_insert_resv_region(struct iommu_resv_region *new,
-                            struct list_head *regions)
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+                                   struct list_head *regions)
 {
        struct iommu_resv_region *iter, *tmp, *nr, *top;
        LIST_HEAD(stack);
@@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault);
 int iommu_page_response(struct device *dev,
                        struct iommu_page_response *msg)
 {
-       bool pasid_valid;
+       bool needs_pasid;
        int ret = -EINVAL;
        struct iommu_fault_event *evt;
        struct iommu_fault_page_request *prm;
        struct dev_iommu *param = dev->iommu;
+       bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 
        if (!domain || !domain->ops->page_response)
@@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev,
         */
        list_for_each_entry(evt, &param->fault_param->faults, list) {
                prm = &evt->fault.prm;
-               pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+               if (prm->grpid != msg->grpid)
+                       continue;
 
-               if ((pasid_valid && prm->pasid != msg->pasid) ||
-                   prm->grpid != msg->grpid)
+               /*
+                * If the PASID is required, the corresponding request is
+                * matched using the group ID, the PASID valid bit and the PASID
+                * value. Otherwise only the group ID matches request and
+                * response.
+                */
+               needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+               if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
                        continue;
 
-               /* Sanitize the reply */
-               msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+               if (!needs_pasid && has_pasid) {
+                       /* No big deal, just clear it. */
+                       msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
+                       msg->pasid = 0;
+               }
 
                ret = domain->ops->page_response(dev, evt, msg);
                list_del(&evt->list);
@@ -2168,8 +2179,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
        return pgsize;
 }
 
-int __iommu_map(struct iommu_domain *domain, unsigned long iova,
-             phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+                      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
        const struct iommu_ops *ops = domain->ops;
        unsigned long orig_iova = iova;
@@ -2319,9 +2330,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
 
-size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
-                   struct scatterlist *sg, unsigned int nents, int prot,
-                   gfp_t gfp)
+static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+                            struct scatterlist *sg, unsigned int nents, int prot,
+                            gfp_t gfp)
 {
        size_t len = 0, mapped = 0;
        phys_addr_t start;
index 49fc01f..45a251d 100644 (file)
@@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
        for (i = 0 ; i < mag->size; ++i) {
                struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
 
-               BUG_ON(!iova);
+               if (WARN_ON(!iova))
+                       continue;
+
                private_free_iova(iovad, iova);
        }
 
index 6de86e7..0f18abd 100644 (file)
@@ -3,7 +3,7 @@
  * IOMMU API for Renesas VMSA-compatible IPMMU
  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  *
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2020 Renesas Electronics Corporation
  */
 
 #include <linux/bitmap.h>
@@ -686,7 +686,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
        if (!domain)
                return -ENODEV;
 
-       return domain->iop->map(domain->iop, iova, paddr, size, prot);
+       return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
 }
 
 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@@ -739,7 +739,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
        { .soc_id = "r8a774a1", },
        { .soc_id = "r8a774b1", },
        { .soc_id = "r8a774c0", },
+       { .soc_id = "r8a774e1", },
        { .soc_id = "r8a7795", },
+       { .soc_id = "r8a77961", },
        { .soc_id = "r8a7796", },
        { .soc_id = "r8a77965", },
        { .soc_id = "r8a77970", },
@@ -751,7 +753,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
 static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
        { .soc_id = "r8a774b1", },
        { .soc_id = "r8a774c0", },
+       { .soc_id = "r8a774e1", },
        { .soc_id = "r8a7795", .revision = "ES3.*" },
+       { .soc_id = "r8a77961", },
        { .soc_id = "r8a77965", },
        { .soc_id = "r8a77990", },
        { .soc_id = "r8a77995", },
@@ -963,12 +967,18 @@ static const struct of_device_id ipmmu_of_ids[] = {
                .compatible = "renesas,ipmmu-r8a774c0",
                .data = &ipmmu_features_rcar_gen3,
        }, {
+               .compatible = "renesas,ipmmu-r8a774e1",
+               .data = &ipmmu_features_rcar_gen3,
+       }, {
                .compatible = "renesas,ipmmu-r8a7795",
                .data = &ipmmu_features_rcar_gen3,
        }, {
                .compatible = "renesas,ipmmu-r8a7796",
                .data = &ipmmu_features_rcar_gen3,
        }, {
+               .compatible = "renesas,ipmmu-r8a77961",
+               .data = &ipmmu_features_rcar_gen3,
+       }, {
                .compatible = "renesas,ipmmu-r8a77965",
                .data = &ipmmu_features_rcar_gen3,
        }, {
index 3d8a635..3615cd6 100644 (file)
@@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
        int ret;
 
        spin_lock_irqsave(&priv->pgtlock, flags);
-       ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+       ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
        spin_unlock_irqrestore(&priv->pgtlock, flags);
 
        return ret;
@@ -593,14 +593,14 @@ static void insert_iommu_master(struct device *dev,
                                struct msm_iommu_dev **iommu,
                                struct of_phandle_args *spec)
 {
-       struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
+       struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
        int sid;
 
        if (list_empty(&(*iommu)->ctx_list)) {
                master = kzalloc(sizeof(*master), GFP_ATOMIC);
                master->of_node = dev->of_node;
                list_add(&master->list, &(*iommu)->ctx_list);
-               dev->archdata.iommu = master;
+               dev_iommu_priv_set(dev, master);
        }
 
        for (sid = 0; sid < master->num_mids; sid++)
index 2be96f1..785b228 100644 (file)
 #define REG_MMU_INVLD_START_A                  0x024
 #define REG_MMU_INVLD_END_A                    0x028
 
-#define REG_MMU_INV_SEL                                0x038
+#define REG_MMU_INV_SEL_GEN2                   0x02c
+#define REG_MMU_INV_SEL_GEN1                   0x038
 #define F_INVLD_EN0                            BIT(0)
 #define F_INVLD_EN1                            BIT(1)
 
-#define REG_MMU_STANDARD_AXI_MODE              0x048
+#define REG_MMU_MISC_CTRL                      0x048
+#define F_MMU_IN_ORDER_WR_EN_MASK              (BIT(1) | BIT(17))
+#define F_MMU_STANDARD_AXI_MODE_MASK           (BIT(3) | BIT(19))
+
 #define REG_MMU_DCM_DIS                                0x050
+#define REG_MMU_WR_LEN_CTRL                    0x054
+#define F_MMU_WR_THROT_DIS_MASK                        (BIT(5) | BIT(21))
 
 #define REG_MMU_CTRL_REG                       0x110
 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR          (2 << 4)
 #define REG_MMU1_INVLD_PA                      0x148
 #define REG_MMU0_INT_ID                                0x150
 #define REG_MMU1_INT_ID                                0x154
+#define F_MMU_INT_ID_COMM_ID(a)                        (((a) >> 9) & 0x7)
+#define F_MMU_INT_ID_SUB_COMM_ID(a)            (((a) >> 7) & 0x3)
 #define F_MMU_INT_ID_LARB_ID(a)                        (((a) >> 7) & 0x7)
 #define F_MMU_INT_ID_PORT_ID(a)                        (((a) >> 2) & 0x1f)
 
-#define MTK_PROTECT_PA_ALIGN                   128
+#define MTK_PROTECT_PA_ALIGN                   256
 
 /*
  * Get the local arbiter ID and the portid within the larb arbiter
 #define MTK_M4U_TO_LARB(id)            (((id) >> 5) & 0xf)
 #define MTK_M4U_TO_PORT(id)            ((id) & 0x1f)
 
+#define HAS_4GB_MODE                   BIT(0)
+/* HW will use the EMI clock if there isn't the "bclk". */
+#define HAS_BCLK                       BIT(1)
+#define HAS_VLD_PA_RNG                 BIT(2)
+#define RESET_AXI                      BIT(3)
+#define OUT_ORDER_WR_EN                        BIT(4)
+#define HAS_SUB_COMM                   BIT(5)
+#define WR_THROT_EN                    BIT(6)
+
+#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
+               ((((pdata)->flags) & (_x)) == (_x))
+
 struct mtk_iommu_domain {
        struct io_pgtable_cfg           cfg;
        struct io_pgtable_ops           *iop;
@@ -165,7 +185,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
 
        for_each_m4u(data) {
                writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
-                              data->base + REG_MMU_INV_SEL);
+                              data->base + data->plat_data->inv_sel_reg);
                writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
                wmb(); /* Make sure the tlb flush all done */
        }
@@ -182,7 +202,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
        for_each_m4u(data) {
                spin_lock_irqsave(&data->tlb_lock, flags);
                writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
-                              data->base + REG_MMU_INV_SEL);
+                              data->base + data->plat_data->inv_sel_reg);
 
                writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
                writel_relaxed(iova + size - 1,
@@ -226,7 +246,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
        struct mtk_iommu_data *data = dev_id;
        struct mtk_iommu_domain *dom = data->m4u_dom;
        u32 int_state, regval, fault_iova, fault_pa;
-       unsigned int fault_larb, fault_port;
+       unsigned int fault_larb, fault_port, sub_comm = 0;
        bool layer, write;
 
        /* Read error info from registers */
@@ -242,10 +262,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
        }
        layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
        write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
-       fault_larb = F_MMU_INT_ID_LARB_ID(regval);
        fault_port = F_MMU_INT_ID_PORT_ID(regval);
-
-       fault_larb = data->plat_data->larbid_remap[fault_larb];
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
+               fault_larb = F_MMU_INT_ID_COMM_ID(regval);
+               sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
+       } else {
+               fault_larb = F_MMU_INT_ID_LARB_ID(regval);
+       }
+       fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
 
        if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
                               write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -397,7 +421,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
                paddr |= BIT_ULL(32);
 
        /* Synchronize with the tlb_lock */
-       return dom->iop->map(dom->iop, iova, paddr, size, prot);
+       return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
 }
 
 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -532,11 +556,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                return ret;
        }
 
-       if (data->plat_data->m4u_plat == M4U_MT8173)
+       if (data->plat_data->m4u_plat == M4U_MT8173) {
                regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
                         F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
-       else
-               regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+       } else {
+               regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
+               regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
+       }
        writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
 
        regval = F_L2_MULIT_HIT_EN |
@@ -563,7 +589,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                         upper_32_bits(data->protect_base);
        writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
 
-       if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
+       if (data->enable_4GB &&
+           MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
                /*
                 * If 4GB mode is enabled, the validate PA range is from
                 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -572,9 +599,23 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
                writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
        }
        writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
+               /* write command throttling mode */
+               regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
+               regval &= ~F_MMU_WR_THROT_DIS_MASK;
+               writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
+       }
 
-       if (data->plat_data->reset_axi)
-               writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
+               /* The register is called STANDARD_AXI_MODE in this case */
+               regval = 0;
+       } else {
+               regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
+               regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
+               if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
+                       regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
+       }
+       writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
 
        if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
                             dev_name(data->dev), (void *)data)) {
@@ -616,7 +657,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
 
        /* Whether the current dram is over 4GB */
        data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
-       if (!data->plat_data->has_4gb_mode)
+       if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
                data->enable_4GB = false;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -629,7 +670,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (data->irq < 0)
                return data->irq;
 
-       if (data->plat_data->has_bclk) {
+       if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
                data->bclk = devm_clk_get(dev, "bclk");
                if (IS_ERR(data->bclk))
                        return PTR_ERR(data->bclk);
@@ -718,8 +759,8 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
        struct mtk_iommu_suspend_reg *reg = &data->reg;
        void __iomem *base = data->base;
 
-       reg->standard_axi_mode = readl_relaxed(base +
-                                              REG_MMU_STANDARD_AXI_MODE);
+       reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
+       reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
        reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
        reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
        reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
@@ -743,8 +784,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
                dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
                return ret;
        }
-       writel_relaxed(reg->standard_axi_mode,
-                      base + REG_MMU_STANDARD_AXI_MODE);
+       writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
+       writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
        writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
        writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
        writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
@@ -763,28 +804,35 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
 
 static const struct mtk_iommu_plat_data mt2712_data = {
        .m4u_plat     = M4U_MT2712,
-       .has_4gb_mode = true,
-       .has_bclk     = true,
-       .has_vld_pa_rng   = true,
-       .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+       .flags        = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
+       .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
+};
+
+static const struct mtk_iommu_plat_data mt6779_data = {
+       .m4u_plat      = M4U_MT6779,
+       .flags         = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
+       .inv_sel_reg   = REG_MMU_INV_SEL_GEN2,
+       .larbid_remap  = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
 };
 
 static const struct mtk_iommu_plat_data mt8173_data = {
        .m4u_plat     = M4U_MT8173,
-       .has_4gb_mode = true,
-       .has_bclk     = true,
-       .reset_axi    = true,
-       .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
+       .flags        = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
+       .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
 };
 
 static const struct mtk_iommu_plat_data mt8183_data = {
        .m4u_plat     = M4U_MT8183,
-       .reset_axi    = true,
-       .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
+       .flags        = RESET_AXI,
+       .inv_sel_reg  = REG_MMU_INV_SEL_GEN1,
+       .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
 };
 
 static const struct of_device_id mtk_iommu_of_ids[] = {
        { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
+       { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
        { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
        { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
        {}
index ea949a3..122925d 100644 (file)
 #include <linux/iommu.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
 #include <soc/mediatek/smi.h>
 
+#define MTK_LARB_COM_MAX       8
+#define MTK_LARB_SUBCOM_MAX    4
+
 struct mtk_iommu_suspend_reg {
-       u32                             standard_axi_mode;
+       union {
+               u32                     standard_axi_mode;/* v1 */
+               u32                     misc_ctrl;/* v2 */
+       };
        u32                             dcm_dis;
        u32                             ctrl_reg;
        u32                             int_control0;
        u32                             int_main_control;
        u32                             ivrp_paddr;
        u32                             vld_pa_rng;
+       u32                             wr_len_ctrl;
 };
 
 enum mtk_iommu_plat {
        M4U_MT2701,
        M4U_MT2712,
+       M4U_MT6779,
        M4U_MT8173,
        M4U_MT8183,
 };
 
 struct mtk_iommu_plat_data {
        enum mtk_iommu_plat m4u_plat;
-       bool                has_4gb_mode;
-
-       /* HW will use the EMI clock if there isn't the "bclk". */
-       bool                has_bclk;
-       bool                has_vld_pa_rng;
-       bool                reset_axi;
-       unsigned char       larbid_remap[MTK_LARB_NR_MAX];
+       u32                 flags;
+       u32                 inv_sel_reg;
+       unsigned char       larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
 };
 
 struct mtk_iommu_domain;
@@ -62,6 +67,8 @@ struct mtk_iommu_data {
        struct iommu_device             iommu;
        const struct mtk_iommu_plat_data *plat_data;
 
+       struct dma_iommu_mapping        *mapping; /* For mtk_iommu_v1.c */
+
        struct list_head                list;
        struct mtk_smi_larb_iommu       larb_imu[MTK_LARB_NR_MAX];
 };
index c9d79cf..82ddfe9 100644 (file)
@@ -269,7 +269,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
        int ret;
 
        /* Only allow the domain created internally. */
-       mtk_mapping = data->dev->archdata.iommu;
+       mtk_mapping = data->mapping;
        if (mtk_mapping->domain != domain)
                return 0;
 
@@ -369,7 +369,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
        struct mtk_iommu_data *data;
        struct platform_device *m4updev;
        struct dma_iommu_mapping *mtk_mapping;
-       struct device *m4udev;
        int ret;
 
        if (args->args_count != 1) {
@@ -401,8 +400,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
                return ret;
 
        data = dev_iommu_priv_get(dev);
-       m4udev = data->dev;
-       mtk_mapping = m4udev->archdata.iommu;
+       mtk_mapping = data->mapping;
        if (!mtk_mapping) {
                /* MTK iommu support 4GB iova address space. */
                mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
@@ -410,7 +408,7 @@ static int mtk_iommu_create_mapping(struct device *dev,
                if (IS_ERR(mtk_mapping))
                        return PTR_ERR(mtk_mapping);
 
-               m4udev->archdata.iommu = mtk_mapping;
+               data->mapping = mtk_mapping;
        }
 
        return 0;
@@ -459,7 +457,7 @@ static void mtk_iommu_probe_finalize(struct device *dev)
        int err;
 
        data        = dev_iommu_priv_get(dev);
-       mtk_mapping = data->dev->archdata.iommu;
+       mtk_mapping = data->mapping;
 
        err = arm_iommu_attach_device(dev, mtk_mapping);
        if (err)
index 8e19bfa..a99afb5 100644 (file)
@@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
        mutex_lock(&iommu_debug_lock);
 
        bytes = omap_iommu_dump_ctx(obj, p, count);
+       if (bytes < 0)
+               goto err;
        bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
 
+err:
        mutex_unlock(&iommu_debug_lock);
        kfree(buf);
 
index c8282cc..71f29c0 100644 (file)
@@ -3,7 +3,7 @@
  * omap iommu: tlb and pagetable primitives
  *
  * Copyright (C) 2008-2010 Nokia Corporation
- * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
  *
  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
  *             Paul Mundt and Toshihiro Kobayashi
@@ -71,7 +71,7 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
  **/
 void omap_iommu_save_ctx(struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        struct omap_iommu *obj;
        u32 *p;
        int i;
@@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
  **/
 void omap_iommu_restore_ctx(struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        struct omap_iommu *obj;
        u32 *p;
        int i;
@@ -1398,7 +1398,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
 
 static int omap_iommu_count(struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        int count = 0;
 
        while (arch_data->iommu_dev) {
@@ -1459,8 +1459,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
 static int
 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
        struct omap_iommu_device *iommu;
        struct omap_iommu *oiommu;
        int ret = 0;
@@ -1524,7 +1524,7 @@ out:
 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
                                   struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        struct omap_iommu_device *iommu = omap_domain->iommus;
        struct omap_iommu *oiommu;
        int i;
@@ -1650,7 +1650,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
        int num_iommus, i;
 
        /*
-        * Allocate the archdata iommu structure for DT-based devices.
+        * Allocate the per-device iommu structure for DT-based devices.
         *
         * TODO: Simplify this when removing non-DT support completely from the
         * IOMMU users.
@@ -1698,7 +1698,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
                of_node_put(np);
        }
 
-       dev->archdata.iommu = arch_data;
+       dev_iommu_priv_set(dev, arch_data);
 
        /*
         * use the first IOMMU alone for the sysfs device linking.
@@ -1712,19 +1712,19 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
 
 static void omap_iommu_release_device(struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
 
        if (!dev->of_node || !arch_data)
                return;
 
-       dev->archdata.iommu = NULL;
+       dev_iommu_priv_set(dev, NULL);
        kfree(arch_data);
 
 }
 
 static struct iommu_group *omap_iommu_device_group(struct device *dev)
 {
-       struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
        struct iommu_group *group = ERR_PTR(-EINVAL);
 
        if (!arch_data)
index d25c248..e5d86b7 100644 (file)
@@ -836,7 +836,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
 
 static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 {
-       struct rk_iommudata *data = dev->archdata.iommu;
+       struct rk_iommudata *data = dev_iommu_priv_get(dev);
 
        return data ? data->iommu : NULL;
 }
@@ -1059,7 +1059,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
        struct rk_iommudata *data;
        struct rk_iommu *iommu;
 
-       data = dev->archdata.iommu;
+       data = dev_iommu_priv_get(dev);
        if (!data)
                return ERR_PTR(-ENODEV);
 
@@ -1073,7 +1073,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev)
 
 static void rk_iommu_release_device(struct device *dev)
 {
-       struct rk_iommudata *data = dev->archdata.iommu;
+       struct rk_iommudata *data = dev_iommu_priv_get(dev);
 
        device_link_del(data->link);
 }
@@ -1100,7 +1100,7 @@ static int rk_iommu_of_xlate(struct device *dev,
        iommu_dev = of_find_device_by_node(args->np);
 
        data->iommu = platform_get_drvdata(iommu_dev);
-       dev->archdata.iommu = data;
+       dev_iommu_priv_set(dev, data);
 
        platform_device_put(iommu_dev);
 
index 5fbdff6..fac7202 100644 (file)
@@ -113,8 +113,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
 
        if (gart->active_domain && gart->active_domain != domain) {
                ret = -EBUSY;
-       } else if (dev->archdata.iommu != domain) {
-               dev->archdata.iommu = domain;
+       } else if (dev_iommu_priv_get(dev) != domain) {
+               dev_iommu_priv_set(dev, domain);
                gart->active_domain = domain;
                gart->active_devices++;
        }
@@ -131,8 +131,8 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
 
        spin_lock(&gart->dom_lock);
 
-       if (dev->archdata.iommu == domain) {
-               dev->archdata.iommu = NULL;
+       if (dev_iommu_priv_get(dev) == domain) {
+               dev_iommu_priv_set(dev, NULL);
 
                if (--gart->active_devices == 0)
                        gart->active_domain = NULL;
index 7426b76..124c884 100644 (file)
@@ -465,7 +465,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
                                 struct device *dev)
 {
-       struct tegra_smmu *smmu = dev->archdata.iommu;
+       struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
        struct tegra_smmu_as *as = to_smmu_as(domain);
        struct device_node *np = dev->of_node;
        struct of_phandle_args args;
@@ -780,7 +780,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
                         * supported by the Linux kernel, so abort after the
                         * first match.
                         */
-                       dev->archdata.iommu = smmu;
+                       dev_iommu_priv_set(dev, smmu);
 
                        break;
                }
@@ -797,7 +797,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
 
 static void tegra_smmu_release_device(struct device *dev)
 {
-       dev->archdata.iommu = NULL;
+       dev_iommu_priv_set(dev, NULL);
 }
 
 static const struct tegra_smmu_group_soc *
@@ -856,7 +856,7 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
 static struct iommu_group *tegra_smmu_device_group(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct tegra_smmu *smmu = dev->archdata.iommu;
+       struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
        struct iommu_group *group;
 
        group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
index f6f0748..b4da396 100644 (file)
@@ -1010,8 +1010,8 @@ static int viommu_probe(struct virtio_device *vdev)
        if (ret)
                return ret;
 
-       virtio_cread(vdev, struct virtio_iommu_config, page_size_mask,
-                    &viommu->pgsize_bitmap);
+       virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
+                       &viommu->pgsize_bitmap);
 
        if (!viommu->pgsize_bitmap) {
                ret = -EINVAL;
@@ -1022,25 +1022,25 @@ static int viommu_probe(struct virtio_device *vdev)
        viommu->last_domain = ~0U;
 
        /* Optional features */
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
-                            struct virtio_iommu_config, input_range.start,
-                            &input_start);
+       virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+                               struct virtio_iommu_config, input_range.start,
+                               &input_start);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
-                            struct virtio_iommu_config, input_range.end,
-                            &input_end);
+       virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
+                               struct virtio_iommu_config, input_range.end,
+                               &input_end);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
-                            struct virtio_iommu_config, domain_range.start,
-                            &viommu->first_domain);
+       virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                               struct virtio_iommu_config, domain_range.start,
+                               &viommu->first_domain);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
-                            struct virtio_iommu_config, domain_range.end,
-                            &viommu->last_domain);
+       virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                               struct virtio_iommu_config, domain_range.end,
+                               &viommu->last_domain);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
-                            struct virtio_iommu_config, probe_size,
-                            &viommu->probe_size);
+       virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
+                               struct virtio_iommu_config, probe_size,
+                               &viommu->probe_size);
 
        viommu->geometry = (struct iommu_domain_geometry) {
                .aperture_start = input_start,
index c10a931..53945ca 100644 (file)
@@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs)
 
        /* read last_rx_curr from register once */
        pdcs->last_rx_curr =
-           (ioread32(&pdcs->rxregs_64->status0) &
+           (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
             CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
 
        do {
index 73fd50e..d50737e 100644 (file)
@@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
                bitmap = get_bitmap_from_slot(mddev, i);
                if (IS_ERR(bitmap)) {
                        pr_err("can't get bitmap from slot %d\n", i);
+                       bitmap = NULL;
                        goto out;
                }
                counts = &bitmap->counts;
index 15bbdc1..6072782 100644 (file)
@@ -850,7 +850,13 @@ void mddev_unlock(struct mddev *mddev)
                                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
                                if (mddev->sysfs_action)
                                        sysfs_put(mddev->sysfs_action);
+                               if (mddev->sysfs_completed)
+                                       sysfs_put(mddev->sysfs_completed);
+                               if (mddev->sysfs_degraded)
+                                       sysfs_put(mddev->sysfs_degraded);
                                mddev->sysfs_action = NULL;
+                               mddev->sysfs_completed = NULL;
+                               mddev->sysfs_degraded = NULL;
                        }
                }
                mddev->sysfs_active = 0;
@@ -4068,6 +4074,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
                        pr_warn("md: cannot register extra attributes for %s\n",
                                mdname(mddev));
                mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+               mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+               mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
        }
        if (oldpers->sync_request != NULL &&
            pers->sync_request == NULL) {
@@ -5582,14 +5590,9 @@ static void md_free(struct kobject *ko)
 
        if (mddev->sysfs_state)
                sysfs_put(mddev->sysfs_state);
-       if (mddev->sysfs_completed)
-               sysfs_put(mddev->sysfs_completed);
-       if (mddev->sysfs_degraded)
-               sysfs_put(mddev->sysfs_degraded);
        if (mddev->sysfs_level)
                sysfs_put(mddev->sysfs_level);
 
-
        if (mddev->gendisk)
                del_gendisk(mddev->gendisk);
        if (mddev->queue)
@@ -5757,8 +5760,6 @@ static int md_alloc(dev_t dev, char *name)
        if (!error && mddev->kobj.sd) {
                kobject_uevent(&mddev->kobj, KOBJ_ADD);
                mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
-               mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
-               mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
                mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
        }
        mddev_put(mddev);
@@ -6036,6 +6037,8 @@ int md_run(struct mddev *mddev)
                        pr_warn("md: cannot register extra attributes for %s\n",
                                mdname(mddev));
                mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+               mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+               mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
        } else if (mddev->ro == 2) /* auto-readonly not meaningful */
                mddev->ro = 0;
 
index fb8d1fb..ef0fd48 100644 (file)
@@ -7019,7 +7019,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        } else
                goto abort;
        spin_lock_init(&conf->device_lock);
-       seqcount_init(&conf->gen_lock);
+       seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
        mutex_init(&conf->cache_size_mutex);
        init_waitqueue_head(&conf->wait_for_quiescent);
        init_waitqueue_head(&conf->wait_for_stripe);
index 7fb3b26..16fc294 100644 (file)
@@ -582,7 +582,7 @@ struct r5conf {
        int                     prev_chunk_sectors;
        int                     prev_algo;
        short                   generation; /* increments with every reshape */
-       seqcount_t              gen_lock;       /* lock against generation changes */
+       seqcount_spinlock_t     gen_lock;       /* lock against generation changes */
        unsigned long           reshape_checkpoint; /* Time we last updated
                                                     * metadata */
        long long               min_offset_diff; /* minimum difference between
index 152a713..1a32266 100644 (file)
@@ -9,9 +9,11 @@
 
 #if defined(CONFIG_EXYNOS_IOMMU)
 
+#include <linux/iommu.h>
+
 static inline bool exynos_is_iommu_available(struct device *dev)
 {
-       return dev->archdata.iommu != NULL;
+       return dev_iommu_priv_get(dev) != NULL;
 }
 
 #else
index 97440af..2c79e95 100644 (file)
@@ -188,6 +188,16 @@ config RENESAS_RPCIF
          host or HyperFlash. You'll have to select individual components
          under the corresponding menu.
 
+config STM32_FMC2_EBI
+       tristate "Support for FMC2 External Bus Interface on STM32MP SoCs"
+       depends on MACH_STM32MP157 || COMPILE_TEST
+       select MFD_SYSCON
+       help
+         Select this option to enable the STM32 FMC2 External Bus Interface
+         controller. This driver configures the transactions with external
+         devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on
+         SOCs containing the FMC2 External Bus Interface.
+
 source "drivers/memory/samsung/Kconfig"
 source "drivers/memory/tegra/Kconfig"
 
index d105f8e..b4533ff 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_MTK_SMI)         += mtk-smi.o
 obj-$(CONFIG_DA8XX_DDRCTL)     += da8xx-ddrctl.o
 obj-$(CONFIG_PL353_SMC)                += pl353-smc.o
 obj-$(CONFIG_RENESAS_RPCIF)    += renesas-rpc-if.o
+obj-$(CONFIG_STM32_FMC2_EBI)   += stm32-fmc2-ebi.o
 
 obj-$(CONFIG_SAMSUNG_MC)       += samsung/
 obj-$(CONFIG_TEGRA_MC)         += tegra/
index e154bea..c212625 100644 (file)
@@ -239,6 +239,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
        .larb_direct_to_common_mask = BIT(8) | BIT(9),      /* bdpsys */
 };
 
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
+       .config_port  = mtk_smi_larb_config_port_gen2_general,
+       .larb_direct_to_common_mask =
+               BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13),
+               /* DUMMY | IPU0 | IPU1 | CCU | MDLA */
+};
+
 static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
        .has_gals                   = true,
        .config_port                = mtk_smi_larb_config_port_gen2_general,
@@ -260,6 +267,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
                .data = &mtk_smi_larb_mt2712
        },
        {
+               .compatible = "mediatek,mt6779-smi-larb",
+               .data = &mtk_smi_larb_mt6779
+       },
+       {
                .compatible = "mediatek,mt8183-smi-larb",
                .data = &mtk_smi_larb_mt8183
        },
@@ -388,6 +399,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
        .gen = MTK_SMI_GEN2,
 };
 
+static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
+       .gen            = MTK_SMI_GEN2,
+       .has_gals       = true,
+       .bus_sel        = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+                         F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+};
+
 static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
        .gen      = MTK_SMI_GEN2,
        .has_gals = true,
@@ -409,6 +427,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
                .data = &mtk_smi_common_gen2,
        },
        {
+               .compatible = "mediatek,mt6779-smi-common",
+               .data = &mtk_smi_common_mt6779,
+       },
+       {
                .compatible = "mediatek,mt8183-smi-common",
                .data = &mtk_smi_common_mt8183,
        },
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
new file mode 100644 (file)
index 0000000..4d5758c
--- /dev/null
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2020
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1                      0x0
+#define FMC2_BTR1                      0x4
+#define FMC2_BCR(x)                    ((x) * 0x8 + FMC2_BCR1)
+#define FMC2_BTR(x)                    ((x) * 0x8 + FMC2_BTR1)
+#define FMC2_PCSCNTR                   0x20
+#define FMC2_BWTR1                     0x104
+#define FMC2_BWTR(x)                   ((x) * 0x8 + FMC2_BWTR1)
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_CCLKEN               BIT(20)
+#define FMC2_BCR1_FMC2EN               BIT(31)
+
+/* Register: FMC2_BCRx */
+#define FMC2_BCR_MBKEN                 BIT(0)
+#define FMC2_BCR_MUXEN                 BIT(1)
+#define FMC2_BCR_MTYP                  GENMASK(3, 2)
+#define FMC2_BCR_MWID                  GENMASK(5, 4)
+#define FMC2_BCR_FACCEN                        BIT(6)
+#define FMC2_BCR_BURSTEN               BIT(8)
+#define FMC2_BCR_WAITPOL               BIT(9)
+#define FMC2_BCR_WAITCFG               BIT(11)
+#define FMC2_BCR_WREN                  BIT(12)
+#define FMC2_BCR_WAITEN                        BIT(13)
+#define FMC2_BCR_EXTMOD                        BIT(14)
+#define FMC2_BCR_ASYNCWAIT             BIT(15)
+#define FMC2_BCR_CPSIZE                        GENMASK(18, 16)
+#define FMC2_BCR_CBURSTRW              BIT(19)
+#define FMC2_BCR_NBLSET                        GENMASK(23, 22)
+
+/* Register: FMC2_BTRx/FMC2_BWTRx */
+#define FMC2_BXTR_ADDSET               GENMASK(3, 0)
+#define FMC2_BXTR_ADDHLD               GENMASK(7, 4)
+#define FMC2_BXTR_DATAST               GENMASK(15, 8)
+#define FMC2_BXTR_BUSTURN              GENMASK(19, 16)
+#define FMC2_BTR_CLKDIV                        GENMASK(23, 20)
+#define FMC2_BTR_DATLAT                        GENMASK(27, 24)
+#define FMC2_BXTR_ACCMOD               GENMASK(29, 28)
+#define FMC2_BXTR_DATAHLD              GENMASK(31, 30)
+
+/* Register: FMC2_PCSCNTR */
+#define FMC2_PCSCNTR_CSCOUNT           GENMASK(15, 0)
+#define FMC2_PCSCNTR_CNTBEN(x)         BIT((x) + 16)
+
+#define FMC2_MAX_EBI_CE                        4
+#define FMC2_MAX_BANKS                 5
+
+#define FMC2_BCR_CPSIZE_0              0x0
+#define FMC2_BCR_CPSIZE_128            0x1
+#define FMC2_BCR_CPSIZE_256            0x2
+#define FMC2_BCR_CPSIZE_512            0x3
+#define FMC2_BCR_CPSIZE_1024           0x4
+
+#define FMC2_BCR_MWID_8                        0x0
+#define FMC2_BCR_MWID_16               0x1
+
+#define FMC2_BCR_MTYP_SRAM             0x0
+#define FMC2_BCR_MTYP_PSRAM            0x1
+#define FMC2_BCR_MTYP_NOR              0x2
+
+#define FMC2_BXTR_EXTMOD_A             0x0
+#define FMC2_BXTR_EXTMOD_B             0x1
+#define FMC2_BXTR_EXTMOD_C             0x2
+#define FMC2_BXTR_EXTMOD_D             0x3
+
+#define FMC2_BCR_NBLSET_MAX            0x3
+#define FMC2_BXTR_ADDSET_MAX           0xf
+#define FMC2_BXTR_ADDHLD_MAX           0xf
+#define FMC2_BXTR_DATAST_MAX           0xff
+#define FMC2_BXTR_BUSTURN_MAX          0xf
+#define FMC2_BXTR_DATAHLD_MAX          0x3
+#define FMC2_BTR_CLKDIV_MAX            0xf
+#define FMC2_BTR_DATLAT_MAX            0xf
+#define FMC2_PCSCNTR_CSCOUNT_MAX       0xff
+
+enum stm32_fmc2_ebi_bank {
+       FMC2_EBI1 = 0,
+       FMC2_EBI2,
+       FMC2_EBI3,
+       FMC2_EBI4,
+       FMC2_NAND
+};
+
+enum stm32_fmc2_ebi_register_type {
+       FMC2_REG_BCR = 1,
+       FMC2_REG_BTR,
+       FMC2_REG_BWTR,
+       FMC2_REG_PCSCNTR
+};
+
+enum stm32_fmc2_ebi_transaction_type {
+       FMC2_ASYNC_MODE_1_SRAM = 0,
+       FMC2_ASYNC_MODE_1_PSRAM,
+       FMC2_ASYNC_MODE_A_SRAM,
+       FMC2_ASYNC_MODE_A_PSRAM,
+       FMC2_ASYNC_MODE_2_NOR,
+       FMC2_ASYNC_MODE_B_NOR,
+       FMC2_ASYNC_MODE_C_NOR,
+       FMC2_ASYNC_MODE_D_NOR,
+       FMC2_SYNC_READ_SYNC_WRITE_PSRAM,
+       FMC2_SYNC_READ_ASYNC_WRITE_PSRAM,
+       FMC2_SYNC_READ_SYNC_WRITE_NOR,
+       FMC2_SYNC_READ_ASYNC_WRITE_NOR
+};
+
+enum stm32_fmc2_ebi_buswidth {
+       FMC2_BUSWIDTH_8 = 8,
+       FMC2_BUSWIDTH_16 = 16
+};
+
+enum stm32_fmc2_ebi_cpsize {
+       FMC2_CPSIZE_0 = 0,
+       FMC2_CPSIZE_128 = 128,
+       FMC2_CPSIZE_256 = 256,
+       FMC2_CPSIZE_512 = 512,
+       FMC2_CPSIZE_1024 = 1024
+};
+
+struct stm32_fmc2_ebi {
+       struct device *dev;
+       struct clk *clk;
+       struct regmap *regmap;
+       u8 bank_assigned;
+
+       u32 bcr[FMC2_MAX_EBI_CE];
+       u32 btr[FMC2_MAX_EBI_CE];
+       u32 bwtr[FMC2_MAX_EBI_CE];
+       u32 pcscntr;
+};
+
+/*
+ * struct stm32_fmc2_prop - STM32 FMC2 EBI property
+ * @name: the device tree binding name of the property
+ * @bprop: indicate that it is a boolean property
+ * @mprop: indicate that it is a mandatory property
+ * @reg_type: the register that have to be modified
+ * @reg_mask: the bit that have to be modified in the selected register
+ *            in case of it is a boolean property
+ * @reset_val: the default value that have to be set in case the property
+ *             has not been defined in the device tree
+ * @check: this callback ckecks that the property is compliant with the
+ *         transaction type selected
+ * @calculate: this callback is called to calculate for exemple a timing
+ *             set in nanoseconds in the device tree in clock cycles or in
+ *             clock period
+ * @set: this callback applies the values in the registers
+ */
+struct stm32_fmc2_prop {
+       const char *name;
+       bool bprop;
+       bool mprop;
+       int reg_type;
+       u32 reg_mask;
+       u32 reset_val;
+       int (*check)(struct stm32_fmc2_ebi *ebi,
+                    const struct stm32_fmc2_prop *prop, int cs);
+       u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup);
+       int (*set)(struct stm32_fmc2_ebi *ebi,
+                  const struct stm32_fmc2_prop *prop,
+                  int cs, u32 setup);
+};
+
+static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
+                                   const struct stm32_fmc2_prop *prop,
+                                   int cs)
+{
+       u32 bcr;
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+       if (bcr & FMC2_BCR_MTYP)
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
+                                       const struct stm32_fmc2_prop *prop,
+                                       int cs)
+{
+       u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+       if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
+                                          const struct stm32_fmc2_prop *prop,
+                                          int cs)
+{
+       u32 bcr;
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+       if (bcr & FMC2_BCR_BURSTEN)
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
+                                           const struct stm32_fmc2_prop *prop,
+                                           int cs)
+{
+       u32 bcr;
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+       if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
+                                      const struct stm32_fmc2_prop *prop,
+                                      int cs)
+{
+       u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+       if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
+                                            const struct stm32_fmc2_prop *prop,
+                                            int cs)
+{
+       u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+       if (prop->reg_type == FMC2_REG_BWTR)
+               regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+       else
+               regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+
+       if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
+           ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
+                                          const struct stm32_fmc2_prop *prop,
+                                          int cs)
+{
+       u32 bcr, bcr1;
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+       if (cs)
+               regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+       else
+               bcr1 = bcr;
+
+       if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
+               return 0;
+
+       return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_cclk(struct stm32_fmc2_ebi *ebi,
+                                    const struct stm32_fmc2_prop *prop,
+                                    int cs)
+{
+       if (cs)
+               return -EINVAL;
+
+       return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
+static u32 stm32_fmc2_ebi_ns_to_clock_cycles(struct stm32_fmc2_ebi *ebi,
+                                            int cs, u32 setup)
+{
+       unsigned long hclk = clk_get_rate(ebi->clk);
+       unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+
+       return DIV_ROUND_UP(setup * 1000, hclkp);
+}
+
+static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+                                          int cs, u32 setup)
+{
+       u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+       u32 bcr, btr, clk_period;
+
+       regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+       if (bcr & FMC2_BCR1_CCLKEN || !cs)
+               regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+       else
+               regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+
+       clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+
+       return DIV_ROUND_UP(nb_clk_cycles, clk_period);
+}
+
+static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
+{
+       switch (reg_type) {
+       case FMC2_REG_BCR:
+               *reg = FMC2_BCR(cs);
+               break;
+       case FMC2_REG_BTR:
+               *reg = FMC2_BTR(cs);
+               break;
+       case FMC2_REG_BWTR:
+               *reg = FMC2_BWTR(cs);
+               break;
+       case FMC2_REG_PCSCNTR:
+               *reg = FMC2_PCSCNTR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_bit_field(struct stm32_fmc2_ebi *ebi,
+                                       const struct stm32_fmc2_prop *prop,
+                                       int cs, u32 setup)
+{
+       u32 reg;
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       regmap_update_bits(ebi->regmap, reg, prop->reg_mask,
+                          setup ? prop->reg_mask : 0);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_trans_type(struct stm32_fmc2_ebi *ebi,
+                                        const struct stm32_fmc2_prop *prop,
+                                        int cs, u32 setup)
+{
+       u32 bcr_mask, bcr = FMC2_BCR_WREN;
+       u32 btr_mask, btr = 0;
+       u32 bwtr_mask, bwtr = 0;
+
+       bwtr_mask = FMC2_BXTR_ACCMOD;
+       btr_mask = FMC2_BXTR_ACCMOD;
+       bcr_mask = FMC2_BCR_MUXEN | FMC2_BCR_MTYP | FMC2_BCR_FACCEN |
+                  FMC2_BCR_WREN | FMC2_BCR_WAITEN | FMC2_BCR_BURSTEN |
+                  FMC2_BCR_EXTMOD | FMC2_BCR_CBURSTRW;
+
+       switch (setup) {
+       case FMC2_ASYNC_MODE_1_SRAM:
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM);
+               /*
+                * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+                */
+               break;
+       case FMC2_ASYNC_MODE_1_PSRAM:
+               /*
+                * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+               break;
+       case FMC2_ASYNC_MODE_A_SRAM:
+               /*
+                * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM);
+               bcr |= FMC2_BCR_EXTMOD;
+               btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+               bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+               break;
+       case FMC2_ASYNC_MODE_A_PSRAM:
+               /*
+                * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+               bcr |= FMC2_BCR_EXTMOD;
+               btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+               bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+               break;
+       case FMC2_ASYNC_MODE_2_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN;
+               break;
+       case FMC2_ASYNC_MODE_B_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 1
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+               btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B);
+               bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B);
+               break;
+       case FMC2_ASYNC_MODE_C_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 2
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+               btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C);
+               bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C);
+               break;
+       case FMC2_ASYNC_MODE_D_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+                * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 3
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+               btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+               bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+               break;
+       case FMC2_SYNC_READ_SYNC_WRITE_PSRAM:
+               /*
+                * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+               bcr |= FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW;
+               break;
+       case FMC2_SYNC_READ_ASYNC_WRITE_PSRAM:
+               /*
+                * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+               bcr |= FMC2_BCR_BURSTEN;
+               break;
+       case FMC2_SYNC_READ_SYNC_WRITE_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW;
+               break;
+       case FMC2_SYNC_READ_ASYNC_WRITE_NOR:
+               /*
+                * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0,
+                * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+                */
+               bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+               bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN;
+               break;
+       default:
+               /* Type of transaction not supported */
+               return -EINVAL;
+       }
+
+       if (bcr & FMC2_BCR_EXTMOD)
+               regmap_update_bits(ebi->regmap, FMC2_BWTR(cs),
+                                  bwtr_mask, bwtr);
+       regmap_update_bits(ebi->regmap, FMC2_BTR(cs), btr_mask, btr);
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs), bcr_mask, bcr);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_buswidth(struct stm32_fmc2_ebi *ebi,
+                                      const struct stm32_fmc2_prop *prop,
+                                      int cs, u32 setup)
+{
+       u32 val;
+
+       switch (setup) {
+       case FMC2_BUSWIDTH_8:
+               val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_8);
+               break;
+       case FMC2_BUSWIDTH_16:
+               val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_16);
+               break;
+       default:
+               /* Buswidth not supported */
+               return -EINVAL;
+       }
+
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MWID, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_cpsize(struct stm32_fmc2_ebi *ebi,
+                                    const struct stm32_fmc2_prop *prop,
+                                    int cs, u32 setup)
+{
+       u32 val;
+
+       switch (setup) {
+       case FMC2_CPSIZE_0:
+               val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_0);
+               break;
+       case FMC2_CPSIZE_128:
+               val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_128);
+               break;
+       case FMC2_CPSIZE_256:
+               val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_256);
+               break;
+       case FMC2_CPSIZE_512:
+               val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_512);
+               break;
+       case FMC2_CPSIZE_1024:
+               val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_1024);
+               break;
+       default:
+               /* Cpsize not supported */
+               return -EINVAL;
+       }
+
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_CPSIZE, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_bl_setup(struct stm32_fmc2_ebi *ebi,
+                                      const struct stm32_fmc2_prop *prop,
+                                      int cs, u32 setup)
+{
+       u32 val;
+
+       val = min_t(u32, setup, FMC2_BCR_NBLSET_MAX);
+       val = FIELD_PREP(FMC2_BCR_NBLSET, val);
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_NBLSET, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
+                                           const struct stm32_fmc2_prop *prop,
+                                           int cs, u32 setup)
+{
+       u32 bcr, bxtr, reg;
+       u32 val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+       if (prop->reg_type == FMC2_REG_BWTR)
+               regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+       else
+               regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+
+       if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
+               val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
+       else
+               val = min_t(u32, setup, FMC2_BXTR_ADDSET_MAX);
+       val = FIELD_PREP(FMC2_BXTR_ADDSET, val);
+       regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDSET, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_address_hold(struct stm32_fmc2_ebi *ebi,
+                                          const struct stm32_fmc2_prop *prop,
+                                          int cs, u32 setup)
+{
+       u32 val, reg;
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       val = clamp_val(setup, 1, FMC2_BXTR_ADDHLD_MAX);
+       val = FIELD_PREP(FMC2_BXTR_ADDHLD, val);
+       regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDHLD, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_setup(struct stm32_fmc2_ebi *ebi,
+                                        const struct stm32_fmc2_prop *prop,
+                                        int cs, u32 setup)
+{
+       u32 val, reg;
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       val = clamp_val(setup, 1, FMC2_BXTR_DATAST_MAX);
+       val = FIELD_PREP(FMC2_BXTR_DATAST, val);
+       regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAST, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_bus_turnaround(struct stm32_fmc2_ebi *ebi,
+                                            const struct stm32_fmc2_prop *prop,
+                                            int cs, u32 setup)
+{
+       u32 val, reg;
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       val = setup ? min_t(u32, setup - 1, FMC2_BXTR_BUSTURN_MAX) : 0;
+       val = FIELD_PREP(FMC2_BXTR_BUSTURN, val);
+       regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_BUSTURN, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_hold(struct stm32_fmc2_ebi *ebi,
+                                       const struct stm32_fmc2_prop *prop,
+                                       int cs, u32 setup)
+{
+       u32 val, reg;
+       int ret;
+
+       ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+       if (ret)
+               return ret;
+
+       if (prop->reg_type == FMC2_REG_BWTR)
+               val = setup ? min_t(u32, setup - 1, FMC2_BXTR_DATAHLD_MAX) : 0;
+       else
+               val = min_t(u32, setup, FMC2_BXTR_DATAHLD_MAX);
+       val = FIELD_PREP(FMC2_BXTR_DATAHLD, val);
+       regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAHLD, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi,
+                                        const struct stm32_fmc2_prop *prop,
+                                        int cs, u32 setup)
+{
+       u32 val;
+
+       val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1;
+       val = FIELD_PREP(FMC2_BTR_CLKDIV, val);
+       regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi,
+                                          const struct stm32_fmc2_prop *prop,
+                                          int cs, u32 setup)
+{
+       u32 val;
+
+       val = setup > 1 ? min_t(u32, setup - 2, FMC2_BTR_DATLAT_MAX) : 0;
+       val = FIELD_PREP(FMC2_BTR_DATLAT, val);
+       regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_DATLAT, val);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+                                           const struct stm32_fmc2_prop *prop,
+                                           int cs, u32 setup)
+{
+       u32 old_val, new_val, pcscntr;
+
+       if (setup < 1)
+               return 0;
+
+       regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+
+       /* Enable counter for the bank */
+       regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+                          FMC2_PCSCNTR_CNTBEN(cs),
+                          FMC2_PCSCNTR_CNTBEN(cs));
+
+       new_val = min_t(u32, setup - 1, FMC2_PCSCNTR_CSCOUNT_MAX);
+       old_val = FIELD_GET(FMC2_PCSCNTR_CSCOUNT, pcscntr);
+       if (old_val && new_val > old_val)
+               /* Keep current counter value */
+               return 0;
+
+       new_val = FIELD_PREP(FMC2_PCSCNTR_CSCOUNT, new_val);
+       regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+                          FMC2_PCSCNTR_CSCOUNT, new_val);
+
+       return 0;
+}
+
+static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
+       /* st,fmc2-ebi-cs-trans-type must be the first property */
+       {
+               .name = "st,fmc2-ebi-cs-transaction-type",
+               .mprop = true,
+               .set = stm32_fmc2_ebi_set_trans_type,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-cclk-enable",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR1_CCLKEN,
+               .check = stm32_fmc2_ebi_check_cclk,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-mux-enable",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR_MUXEN,
+               .check = stm32_fmc2_ebi_check_mux,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-buswidth",
+               .reset_val = FMC2_BUSWIDTH_16,
+               .set = stm32_fmc2_ebi_set_buswidth,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-waitpol-high",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR_WAITPOL,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-waitcfg-enable",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR_WAITCFG,
+               .check = stm32_fmc2_ebi_check_waitcfg,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-wait-enable",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR_WAITEN,
+               .check = stm32_fmc2_ebi_check_sync_trans,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-asyncwait-enable",
+               .bprop = true,
+               .reg_type = FMC2_REG_BCR,
+               .reg_mask = FMC2_BCR_ASYNCWAIT,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .set = stm32_fmc2_ebi_set_bit_field,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-cpsize",
+               .check = stm32_fmc2_ebi_check_cpsize,
+               .set = stm32_fmc2_ebi_set_cpsize,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-byte-lane-setup-ns",
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_bl_setup,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-address-setup-ns",
+               .reg_type = FMC2_REG_BTR,
+               .reset_val = FMC2_BXTR_ADDSET_MAX,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_address_setup,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-address-hold-ns",
+               .reg_type = FMC2_REG_BTR,
+               .reset_val = FMC2_BXTR_ADDHLD_MAX,
+               .check = stm32_fmc2_ebi_check_address_hold,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_address_hold,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-data-setup-ns",
+               .reg_type = FMC2_REG_BTR,
+               .reset_val = FMC2_BXTR_DATAST_MAX,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_data_setup,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-bus-turnaround-ns",
+               .reg_type = FMC2_REG_BTR,
+               .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_bus_turnaround,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-data-hold-ns",
+               .reg_type = FMC2_REG_BTR,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_data_hold,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-clk-period-ns",
+               .reset_val = FMC2_BTR_CLKDIV_MAX + 1,
+               .check = stm32_fmc2_ebi_check_clk_period,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_clk_period,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-data-latency-ns",
+               .check = stm32_fmc2_ebi_check_sync_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clk_period,
+               .set = stm32_fmc2_ebi_set_data_latency,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-write-address-setup-ns",
+               .reg_type = FMC2_REG_BWTR,
+               .reset_val = FMC2_BXTR_ADDSET_MAX,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_address_setup,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-write-address-hold-ns",
+               .reg_type = FMC2_REG_BWTR,
+               .reset_val = FMC2_BXTR_ADDHLD_MAX,
+               .check = stm32_fmc2_ebi_check_address_hold,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_address_hold,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-write-data-setup-ns",
+               .reg_type = FMC2_REG_BWTR,
+               .reset_val = FMC2_BXTR_DATAST_MAX,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_data_setup,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns",
+               .reg_type = FMC2_REG_BWTR,
+               .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_bus_turnaround,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-write-data-hold-ns",
+               .reg_type = FMC2_REG_BWTR,
+               .check = stm32_fmc2_ebi_check_async_trans,
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_data_hold,
+       },
+       {
+               .name = "st,fmc2-ebi-cs-max-low-pulse-ns",
+               .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+               .set = stm32_fmc2_ebi_set_max_low_pulse,
+       },
+};
+
+static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi,
+                                    struct device_node *dev_node,
+                                    const struct stm32_fmc2_prop *prop,
+                                    int cs)
+{
+       struct device *dev = ebi->dev;
+       u32 setup = 0;
+
+       if (!prop->set) {
+               dev_err(dev, "property %s is not well defined\n", prop->name);
+               return -EINVAL;
+       }
+
+       if (prop->check && prop->check(ebi, prop, cs))
+               /* Skeep this property */
+               return 0;
+
+       if (prop->bprop) {
+               bool bprop;
+
+               bprop = of_property_read_bool(dev_node, prop->name);
+               if (prop->mprop && !bprop) {
+                       dev_err(dev, "mandatory property %s not defined in the device tree\n",
+                               prop->name);
+                       return -EINVAL;
+               }
+
+               if (bprop)
+                       setup = 1;
+       } else {
+               u32 val;
+               int ret;
+
+               ret = of_property_read_u32(dev_node, prop->name, &val);
+               if (prop->mprop && ret) {
+                       dev_err(dev, "mandatory property %s not defined in the device tree\n",
+                               prop->name);
+                       return ret;
+               }
+
+               if (ret)
+                       setup = prop->reset_val;
+               else if (prop->calculate)
+                       setup = prop->calculate(ebi, cs, val);
+               else
+                       setup = val;
+       }
+
+       return prop->set(ebi, prop, cs, setup);
+}
+
+static void stm32_fmc2_ebi_enable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+{
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs),
+                          FMC2_BCR_MBKEN, FMC2_BCR_MBKEN);
+}
+
+static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+{
+       regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
+}
+
+static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+       unsigned int cs;
+
+       for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+               regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+               regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+               regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+       }
+
+       regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+}
+
+static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+       unsigned int cs;
+
+       for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+               regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]);
+               regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]);
+               regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]);
+       }
+
+       regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr);
+}
+
+static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
+{
+       unsigned int cs;
+
+       for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+               if (!(ebi->bank_assigned & BIT(cs)))
+                       continue;
+
+               stm32_fmc2_ebi_disable_bank(ebi, cs);
+       }
+}
+
+/* NWAIT signal can not be connected to EBI controller and NAND controller */
+static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+{
+       unsigned int cs;
+       u32 bcr;
+
+       for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+               if (!(ebi->bank_assigned & BIT(cs)))
+                       continue;
+
+               regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+               if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
+                   ebi->bank_assigned & BIT(FMC2_NAND))
+                       return true;
+       }
+
+       return false;
+}
+
+static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
+{
+       regmap_update_bits(ebi->regmap, FMC2_BCR1,
+                          FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+}
+
+static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi)
+{
+       regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0);
+}
+
+static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
+                                  struct device_node *dev_node,
+                                  u32 cs)
+{
+       unsigned int i;
+       int ret;
+
+       stm32_fmc2_ebi_disable_bank(ebi, cs);
+
+       for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) {
+               const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i];
+
+               ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs);
+               if (ret) {
+                       dev_err(ebi->dev, "property %s could not be set: %d\n",
+                               p->name, ret);
+                       return ret;
+               }
+       }
+
+       stm32_fmc2_ebi_enable_bank(ebi, cs);
+
+       return 0;
+}
+
+static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+{
+       struct device *dev = ebi->dev;
+       struct device_node *child;
+       bool child_found = false;
+       u32 bank;
+       int ret;
+
+       for_each_available_child_of_node(dev->of_node, child) {
+               ret = of_property_read_u32(child, "reg", &bank);
+               if (ret) {
+                       dev_err(dev, "could not retrieve reg property: %d\n",
+                               ret);
+                       return ret;
+               }
+
+               if (bank >= FMC2_MAX_BANKS) {
+                       dev_err(dev, "invalid reg value: %d\n", bank);
+                       return -EINVAL;
+               }
+
+               if (ebi->bank_assigned & BIT(bank)) {
+                       dev_err(dev, "bank already assigned: %d\n", bank);
+                       return -EINVAL;
+               }
+
+               if (bank < FMC2_MAX_EBI_CE) {
+                       ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank);
+                       if (ret) {
+                               dev_err(dev, "setup chip select %d failed: %d\n",
+                                       bank, ret);
+                               return ret;
+                       }
+               }
+
+               ebi->bank_assigned |= BIT(bank);
+               child_found = true;
+       }
+
+       if (!child_found) {
+               dev_warn(dev, "no subnodes found, disable the driver.\n");
+               return -ENODEV;
+       }
+
+       if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
+               dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+               return -EINVAL;
+       }
+
+       stm32_fmc2_ebi_enable(ebi);
+
+       return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct stm32_fmc2_ebi *ebi;
+       struct reset_control *rstc;
+       int ret;
+
+       ebi = devm_kzalloc(&pdev->dev, sizeof(*ebi), GFP_KERNEL);
+       if (!ebi)
+               return -ENOMEM;
+
+       ebi->dev = dev;
+
+       ebi->regmap = device_node_to_regmap(dev->of_node);
+       if (IS_ERR(ebi->regmap))
+               return PTR_ERR(ebi->regmap);
+
+       ebi->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(ebi->clk))
+               return PTR_ERR(ebi->clk);
+
+       rstc = devm_reset_control_get(dev, NULL);
+       if (PTR_ERR(rstc) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+
+       ret = clk_prepare_enable(ebi->clk);
+       if (ret)
+               return ret;
+
+       if (!IS_ERR(rstc)) {
+               reset_control_assert(rstc);
+               reset_control_deassert(rstc);
+       }
+
+       ret = stm32_fmc2_ebi_parse_dt(ebi);
+       if (ret)
+               goto err_release;
+
+       stm32_fmc2_ebi_save_setup(ebi);
+       platform_set_drvdata(pdev, ebi);
+
+       return 0;
+
+err_release:
+       stm32_fmc2_ebi_disable_banks(ebi);
+       stm32_fmc2_ebi_disable(ebi);
+       clk_disable_unprepare(ebi->clk);
+
+       return ret;
+}
+
+static int stm32_fmc2_ebi_remove(struct platform_device *pdev)
+{
+       struct stm32_fmc2_ebi *ebi = platform_get_drvdata(pdev);
+
+       of_platform_depopulate(&pdev->dev);
+       stm32_fmc2_ebi_disable_banks(ebi);
+       stm32_fmc2_ebi_disable(ebi);
+       clk_disable_unprepare(ebi->clk);
+
+       return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
+{
+       struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
+       stm32_fmc2_ebi_disable(ebi);
+       clk_disable_unprepare(ebi->clk);
+       pinctrl_pm_select_sleep_state(dev);
+
+       return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev)
+{
+       struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+       int ret;
+
+       pinctrl_pm_select_default_state(dev);
+
+       ret = clk_prepare_enable(ebi->clk);
+       if (ret)
+               return ret;
+
+       stm32_fmc2_ebi_set_setup(ebi);
+       stm32_fmc2_ebi_enable(ebi);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend,
+                        stm32_fmc2_ebi_resume);
+
+static const struct of_device_id stm32_fmc2_ebi_match[] = {
+       {.compatible = "st,stm32mp1-fmc2-ebi"},
+       {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match);
+
+static struct platform_driver stm32_fmc2_ebi_driver = {
+       .probe  = stm32_fmc2_ebi_probe,
+       .remove = stm32_fmc2_ebi_remove,
+       .driver = {
+               .name = "stm32_fmc2_ebi",
+               .of_match_table = stm32_fmc2_ebi_match,
+               .pm = &stm32_fmc2_ebi_pm_ops,
+       },
+};
+module_platform_driver(stm32_fmc2_ebi_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_ebi");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 ebi driver");
+MODULE_LICENSE("GPL v2");
index a37d7d1..33df083 100644 (file)
@@ -1193,18 +1193,6 @@ config MFD_SKY81452
          This driver can also be built as a module.  If so, the module
          will be called sky81452.
 
-config MFD_SMSC
-       bool "SMSC ECE1099 series chips"
-       depends on I2C=y
-       select MFD_CORE
-       select REGMAP_I2C
-       help
-         If you say yes here you get support for the
-         ece1099 chips from SMSC.
-
-         To compile this driver as a module, choose M here: the
-         module will be called smsc.
-
 config MFD_SC27XX_PMIC
        tristate "Spreadtrum SC27xx PMICs"
        depends on ARCH_SPRD || COMPILE_TEST
@@ -2053,6 +2041,27 @@ config MFD_WCD934X
          This driver provides common support WCD934x audio codec and its
          associated Pin Controller, Soundwire Controller and Audio codec.
 
+config MFD_KHADAS_MCU
+       tristate "Support for Khadas System control Microcontroller"
+       depends on I2C
+       depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST
+       select MFD_CORE
+       select REGMAP_I2C
+       help
+         Support for the Khadas System control Microcontroller interface
+         present on their VIM and Edge boards.
+
+         This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and
+         Edge boards.
+
+         It provides multiple boot control features like password check,
+         power-on options, power-off control and system FAN control on recent
+         boards.
+
+         This driver provides common support for accessing the device,
+         additional drivers must be enabled in order to use the functionality
+         of the device.
+
 menu "Multimedia Capabilities Port drivers"
        depends on ARCH_SA1100
 
index 9367a92..a60e5f8 100644 (file)
@@ -127,7 +127,6 @@ obj-$(CONFIG_MFD_CPCAP)             += motorola-cpcap.o
 obj-$(CONFIG_MCP)              += mcp-core.o
 obj-$(CONFIG_MCP_SA11X0)       += mcp-sa11x0.o
 obj-$(CONFIG_MCP_UCB1200)      += ucb1x00-core.o
-obj-$(CONFIG_MFD_SMSC)        += smsc-ece1099.o
 obj-$(CONFIG_MCP_UCB1200_TS)   += ucb1x00-ts.o
 
 ifeq ($(CONFIG_SA1100_ASSABET),y)
@@ -262,5 +261,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528)      += rohm-bd70528.o
 obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o
 obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o
 obj-$(CONFIG_MFD_STMFX)        += stmfx.o
+obj-$(CONFIG_MFD_KHADAS_MCU)   += khadas-mcu.o
 
 obj-$(CONFIG_SGI_MFD_IOC3)     += ioc3.o
index 57723f1..ee71ae0 100644 (file)
@@ -498,7 +498,7 @@ static ssize_t ab3100_get_set_reg(struct file *file,
        int i = 0;
 
        /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf)-1));
+       buf_size = min((ssize_t)count, (ssize_t)(sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))
                return -EFAULT;
        buf[buf_size] = 0;
index c4751fb..c393102 100644 (file)
 
 /**
  * struct ab3100_otp
- * @dev containing device
- * @locked whether the OTP is locked, after locking, no more bits
+ * @dev: containing device
+ * @locked: whether the OTP is locked, after locking, no more bits
  *       can be changed but before locking it is still possible
  *       to change bits from 1->0.
- * @freq clocking frequency for the OTP, this frequency is either
+ * @freq: clocking frequency for the OTP, this frequency is either
  *       32768Hz or 1MHz/30
- * @paf product activation flag, indicates whether this is a real
+ * @paf: product activation flag, indicates whether this is a real
  *       product (paf true) or a lab board etc (paf false)
- * @imeich if this is set it is possible to override the
+ * @imeich: if this is set it is possible to override the
  *       IMEI number found in the tac, fac and svn fields with
  *       (secured) software
- * @cid customer ID
- * @tac type allocation code of the IMEI
- * @fac final assembly code of the IMEI
- * @svn software version number of the IMEI
- * @debugfs a debugfs file used when dumping to file
+ * @cid: customer ID
+ * @tac: type allocation code of the IMEI
+ * @fac: final assembly code of the IMEI
+ * @svn: software version number of the IMEI
+ * @debugfs: a debugfs file used when dumping to file
  */
 struct ab3100_otp {
        struct device *dev;
index 1a9a341..6d1bf7c 100644 (file)
@@ -1801,7 +1801,7 @@ static ssize_t ab8500_hwreg_write(struct file *file,
        int buf_size, ret;
 
        /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf)-1));
+       buf_size = min((int)count, (int)(sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))
                return -EFAULT;
        buf[buf_size] = 0;
index d2a13a5..41076d1 100644 (file)
 /**
  * struct altr_sysmgr - Altera SOCFPGA System Manager
  * @regmap: the regmap used for System Manager accesses.
- * @base  : the base address for the System Manager
  */
 struct altr_sysmgr {
        struct regmap   *regmap;
-       resource_size_t *base;
 };
 
 static struct platform_driver altr_sysmgr_driver;
@@ -91,6 +89,9 @@ static struct regmap_config altr_sysmgr_regmap_cfg = {
  * altr_sysmgr_regmap_lookup_by_phandle
  * Find the sysmgr previous configured in probe() and return regmap property.
  * Return: regmap if found or error if not found.
+ *
+ * @np: Pointer to device's Device Tree node
+ * @property: Device Tree property name which references the sysmgr
  */
 struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
                                                    const char *property)
@@ -127,6 +128,7 @@ static int sysmgr_probe(struct platform_device *pdev)
        struct regmap_config sysmgr_config = altr_sysmgr_regmap_cfg;
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
+       void __iomem *base;
 
        sysmgr = devm_kzalloc(dev, sizeof(*sysmgr), GFP_KERNEL);
        if (!sysmgr)
@@ -139,22 +141,19 @@ static int sysmgr_probe(struct platform_device *pdev)
        sysmgr_config.max_register = resource_size(res) -
                                     sysmgr_config.reg_stride;
        if (of_device_is_compatible(np, "altr,sys-mgr-s10")) {
-               /* Need physical address for SMCC call */
-               sysmgr->base = (resource_size_t *)res->start;
                sysmgr_config.reg_read = s10_protected_reg_read;
                sysmgr_config.reg_write = s10_protected_reg_write;
 
-               regmap = devm_regmap_init(dev, NULL, sysmgr->base,
+               /* Need physical address for SMCC call */
+               regmap = devm_regmap_init(dev, NULL, (void *)res->start,
                                          &sysmgr_config);
        } else {
-               sysmgr->base = devm_ioremap(dev, res->start,
-                                           resource_size(res));
-               if (!sysmgr->base)
+               base = devm_ioremap(dev, res->start, resource_size(res));
+               if (!base)
                        return -ENOMEM;
 
                sysmgr_config.max_register = res->end - res->start - 3;
-               regmap = devm_regmap_init_mmio(dev, sysmgr->base,
-                                              &sysmgr_config);
+               regmap = devm_regmap_init_mmio(dev, base, &sysmgr_config);
        }
 
        if (IS_ERR(regmap)) {
index f73cf76..000cb82 100644 (file)
@@ -80,7 +80,7 @@ int arizona_clk32k_disable(struct arizona *arizona)
 {
        mutex_lock(&arizona->clk_lock);
 
-       BUG_ON(arizona->clk32k_ref <= 0);
+       WARN_ON(arizona->clk32k_ref <= 0);
 
        arizona->clk32k_ref--;
 
@@ -1426,6 +1426,15 @@ err_irq:
        arizona_irq_exit(arizona);
 err_pm:
        pm_runtime_disable(arizona->dev);
+
+       switch (arizona->pdata.clk32k_src) {
+       case ARIZONA_32KZ_MCLK1:
+       case ARIZONA_32KZ_MCLK2:
+               arizona_clk32k_disable(arizona);
+               break;
+       default:
+               break;
+       }
 err_reset:
        arizona_enable_reset(arizona);
        regulator_disable(arizona->dcvdd);
@@ -1448,6 +1457,15 @@ int arizona_dev_exit(struct arizona *arizona)
        regulator_disable(arizona->dcvdd);
        regulator_put(arizona->dcvdd);
 
+       switch (arizona->pdata.clk32k_src) {
+       case ARIZONA_32KZ_MCLK1:
+       case ARIZONA_32KZ_MCLK2:
+               arizona_clk32k_disable(arizona);
+               break;
+       default:
+               break;
+       }
+
        mfd_remove_devices(arizona->dev);
        arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
        arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
index 1fa2ec9..d96f1d6 100644 (file)
@@ -237,7 +237,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_cycle);
  * atmel_smc_cs_conf_apply - apply an SMC CS conf
  * @regmap: the SMC regmap
  * @cs: the CS id
- * @conf the SMC CS conf to apply
+ * @conf: the SMC CS conf to apply
  *
  * Applies an SMC CS configuration.
  * Only valid on at91sam9/avr32 SoCs.
@@ -257,7 +257,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_apply);
  * @regmap: the HSMC regmap
  * @cs: the CS id
  * @layout: the layout of registers
- * @conf the SMC CS conf to apply
+ * @conf: the SMC CS conf to apply
  *
  * Applies an SMC CS configuration.
  * Only valid on post-sama5 SoCs.
index 14f9df7..068e9c0 100644 (file)
@@ -63,6 +63,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
        { .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
        { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
        { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+       { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID },
        { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID },
        { },
 };
@@ -74,11 +75,13 @@ static const struct i2c_device_id axp20x_i2c_id[] = {
        { "axp209", 0 },
        { "axp221", 0 },
        { "axp223", 0 },
+       { "axp803", 0 },
        { "axp806", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
        {
                .id = "INT33F4",
@@ -87,6 +90,7 @@ static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
        { },
 };
 MODULE_DEVICE_TABLE(acpi, axp20x_i2c_acpi_match);
+#endif
 
 static struct i2c_driver axp20x_i2c_driver = {
        .driver = {
index 32c2b91..d07b43d 100644 (file)
@@ -24,7 +24,7 @@ static struct class cros_class = {
 };
 
 /**
- * cros_feature_to_name - CrOS feature id to name/short description.
+ * struct cros_feature_to_name - CrOS feature id to name/short description.
  * @id: The feature identifier.
  * @name: Device name associated with the feature id.
  * @desc: Short name that will be displayed.
@@ -36,7 +36,7 @@ struct cros_feature_to_name {
 };
 
 /**
- * cros_feature_to_cells - CrOS feature id to mfd cells association.
+ * struct cros_feature_to_cells - CrOS feature id to mfd cells association.
  * @id: The feature identifier.
  * @mfd_cells: Pointer to the array of mfd cells that needs to be added.
  * @num_cells: Number of mfd cells into the array.
index b125f90..a353d52 100644 (file)
@@ -160,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063)
 
 int da9063_device_init(struct da9063 *da9063, unsigned int irq)
 {
-       int model, variant_id, variant_code;
        int ret;
 
        ret = da9063_clear_fault_log(da9063);
@@ -171,36 +170,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
        da9063->irq_base = -1;
        da9063->chip_irq = irq;
 
-       ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
-       if (ret < 0) {
-               dev_err(da9063->dev, "Cannot read chip model id.\n");
-               return -EIO;
-       }
-       if (model != PMIC_CHIP_ID_DA9063) {
-               dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model);
-               return -ENODEV;
-       }
-
-       ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id);
-       if (ret < 0) {
-               dev_err(da9063->dev, "Cannot read chip variant id.\n");
-               return -EIO;
-       }
-
-       variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT;
-
-       dev_info(da9063->dev,
-                "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
-                model, variant_id);
-
-       if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) {
-               dev_err(da9063->dev,
-                       "Cannot support variant code: 0x%02X\n", variant_code);
-               return -ENODEV;
-       }
-
-       da9063->variant_code = variant_code;
-
        ret = da9063_irq_init(da9063);
        if (ret) {
                dev_err(da9063->dev, "Cannot initialize interrupts.\n");
index 455de74..b8217ad 100644 (file)
 #include <linux/of.h>
 #include <linux/regulator/of_regulator.h>
 
+/*
+ * Raw I2C access required for just accessing chip and variant info before we
+ * know which device is present. The info read from the device using this
+ * approach is then used to select the correct regmap tables.
+ */
+
+#define DA9063_REG_PAGE_SIZE           0x100
+#define DA9063_REG_PAGED_ADDR_MASK     0xFF
+
+enum da9063_page_sel_buf_fmt {
+       DA9063_PAGE_SEL_BUF_PAGE_REG = 0,
+       DA9063_PAGE_SEL_BUF_PAGE_VAL,
+       DA9063_PAGE_SEL_BUF_SIZE,
+};
+
+enum da9063_paged_read_msgs {
+       DA9063_PAGED_READ_MSG_PAGE_SEL = 0,
+       DA9063_PAGED_READ_MSG_REG_SEL,
+       DA9063_PAGED_READ_MSG_DATA,
+       DA9063_PAGED_READ_MSG_CNT,
+};
+
+static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr,
+                                   u8 *buf, int count)
+{
+       struct i2c_msg xfer[DA9063_PAGED_READ_MSG_CNT];
+       u8 page_sel_buf[DA9063_PAGE_SEL_BUF_SIZE];
+       u8 page_num, paged_addr;
+       int ret;
+
+       /* Determine page info based on register address */
+       page_num = (addr / DA9063_REG_PAGE_SIZE);
+       if (page_num > 1) {
+               dev_err(&client->dev, "Invalid register address provided\n");
+               return -EINVAL;
+       }
+
+       paged_addr = (addr % DA9063_REG_PAGE_SIZE) & DA9063_REG_PAGED_ADDR_MASK;
+       page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_REG] = DA9063_REG_PAGE_CON;
+       page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_VAL] =
+               (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK;
+
+       /* Write reg address, page selection */
+       xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr;
+       xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0;
+       xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE;
+       xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf;
+
+       /* Select register address */
+       xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr;
+       xfer[DA9063_PAGED_READ_MSG_REG_SEL].flags = 0;
+       xfer[DA9063_PAGED_READ_MSG_REG_SEL].len = sizeof(paged_addr);
+       xfer[DA9063_PAGED_READ_MSG_REG_SEL].buf = &paged_addr;
+
+       /* Read data */
+       xfer[DA9063_PAGED_READ_MSG_DATA].addr = client->addr;
+       xfer[DA9063_PAGED_READ_MSG_DATA].flags = I2C_M_RD;
+       xfer[DA9063_PAGED_READ_MSG_DATA].len = count;
+       xfer[DA9063_PAGED_READ_MSG_DATA].buf = buf;
+
+       ret = i2c_transfer(client->adapter, xfer, DA9063_PAGED_READ_MSG_CNT);
+       if (ret < 0) {
+               dev_err(&client->dev, "Paged block read failed: %d\n", ret);
+               return ret;
+       }
+
+       if (ret != DA9063_PAGED_READ_MSG_CNT) {
+               dev_err(&client->dev, "Paged block read failed to complete\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+enum {
+       DA9063_DEV_ID_REG = 0,
+       DA9063_VAR_ID_REG,
+       DA9063_CHIP_ID_REGS,
+};
+
+static int da9063_get_device_type(struct i2c_client *i2c, struct da9063 *da9063)
+{
+       u8 buf[DA9063_CHIP_ID_REGS];
+       int ret;
+
+       ret = da9063_i2c_blockreg_read(i2c, DA9063_REG_DEVICE_ID, buf,
+                                      DA9063_CHIP_ID_REGS);
+       if (ret)
+               return ret;
+
+       if (buf[DA9063_DEV_ID_REG] != PMIC_CHIP_ID_DA9063) {
+               dev_err(da9063->dev,
+                       "Invalid chip device ID: 0x%02x\n",
+                       buf[DA9063_DEV_ID_REG]);
+               return -ENODEV;
+       }
+
+       dev_info(da9063->dev,
+                "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
+                buf[DA9063_DEV_ID_REG], buf[DA9063_VAR_ID_REG]);
+
+       da9063->variant_code =
+               (buf[DA9063_VAR_ID_REG] & DA9063_VARIANT_ID_MRC_MASK)
+               >> DA9063_VARIANT_ID_MRC_SHIFT;
+
+       return 0;
+}
+
+/*
+ * Variant specific regmap configs
+ */
+
 static const struct regmap_range da9063_ad_readable_ranges[] = {
        regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D),
        regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
        regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
        regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19),
-       regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+       regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
 };
 
 static const struct regmap_range da9063_ad_writeable_ranges[] = {
@@ -72,7 +184,7 @@ static const struct regmap_range da9063_bb_readable_ranges[] = {
        regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
        regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
        regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
-       regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+       regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
 };
 
 static const struct regmap_range da9063_bb_writeable_ranges[] = {
@@ -85,7 +197,7 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = {
        regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
 };
 
-static const struct regmap_range da9063_bb_volatile_ranges[] = {
+static const struct regmap_range da9063_bb_da_volatile_ranges[] = {
        regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
        regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
        regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
@@ -107,9 +219,9 @@ static const struct regmap_access_table da9063_bb_writeable_table = {
        .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges),
 };
 
-static const struct regmap_access_table da9063_bb_volatile_table = {
-       .yes_ranges = da9063_bb_volatile_ranges,
-       .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges),
+static const struct regmap_access_table da9063_bb_da_volatile_table = {
+       .yes_ranges = da9063_bb_da_volatile_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_bb_da_volatile_ranges),
 };
 
 static const struct regmap_range da9063l_bb_readable_ranges[] = {
@@ -117,7 +229,7 @@ static const struct regmap_range da9063l_bb_readable_ranges[] = {
        regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
        regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
        regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19),
-       regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT),
+       regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
 };
 
 static const struct regmap_range da9063l_bb_writeable_ranges[] = {
@@ -129,7 +241,7 @@ static const struct regmap_range da9063l_bb_writeable_ranges[] = {
        regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19),
 };
 
-static const struct regmap_range da9063l_bb_volatile_ranges[] = {
+static const struct regmap_range da9063l_bb_da_volatile_ranges[] = {
        regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D),
        regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B),
        regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F),
@@ -151,15 +263,70 @@ static const struct regmap_access_table da9063l_bb_writeable_table = {
        .n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges),
 };
 
-static const struct regmap_access_table da9063l_bb_volatile_table = {
-       .yes_ranges = da9063l_bb_volatile_ranges,
-       .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges),
+static const struct regmap_access_table da9063l_bb_da_volatile_table = {
+       .yes_ranges = da9063l_bb_da_volatile_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063l_bb_da_volatile_ranges),
+};
+
+static const struct regmap_range da9063_da_readable_ranges[] = {
+       regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D),
+       regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+       regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+       regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11),
+       regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
+};
+
+static const struct regmap_range da9063_da_writeable_ranges[] = {
+       regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+       regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+       regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y),
+       regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+       regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+       regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+       regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11),
+};
+
+static const struct regmap_access_table da9063_da_readable_table = {
+       .yes_ranges = da9063_da_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_da_readable_ranges),
+};
+
+static const struct regmap_access_table da9063_da_writeable_table = {
+       .yes_ranges = da9063_da_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_da_writeable_ranges),
+};
+
+static const struct regmap_range da9063l_da_readable_ranges[] = {
+       regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES),
+       regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+       regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+       regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11),
+       regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID),
+};
+
+static const struct regmap_range da9063l_da_writeable_ranges[] = {
+       regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON),
+       regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON),
+       regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31),
+       regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW),
+       regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4),
+       regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11),
+};
+
+static const struct regmap_access_table da9063l_da_readable_table = {
+       .yes_ranges = da9063l_da_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063l_da_readable_ranges),
+};
+
+static const struct regmap_access_table da9063l_da_writeable_table = {
+       .yes_ranges = da9063l_da_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063l_da_writeable_ranges),
 };
 
 static const struct regmap_range_cfg da9063_range_cfg[] = {
        {
                .range_min = DA9063_REG_PAGE_CON,
-               .range_max = DA9063_REG_CHIP_VARIANT,
+               .range_max = DA9063_REG_CONFIG_ID,
                .selector_reg = DA9063_REG_PAGE_CON,
                .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT,
                .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT,
@@ -173,7 +340,7 @@ static struct regmap_config da9063_regmap_config = {
        .val_bits = 8,
        .ranges = da9063_range_cfg,
        .num_ranges = ARRAY_SIZE(da9063_range_cfg),
-       .max_register = DA9063_REG_CHIP_VARIANT,
+       .max_register = DA9063_REG_CONFIG_ID,
 
        .cache_type = REGCACHE_RBTREE,
 };
@@ -199,18 +366,72 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
        da9063->chip_irq = i2c->irq;
        da9063->type = id->driver_data;
 
-       if (da9063->variant_code == PMIC_DA9063_AD) {
-               da9063_regmap_config.rd_table = &da9063_ad_readable_table;
-               da9063_regmap_config.wr_table = &da9063_ad_writeable_table;
-               da9063_regmap_config.volatile_table = &da9063_ad_volatile_table;
-       } else if (da9063->type == PMIC_TYPE_DA9063L) {
-               da9063_regmap_config.rd_table = &da9063l_bb_readable_table;
-               da9063_regmap_config.wr_table = &da9063l_bb_writeable_table;
-               da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table;
-       } else {
-               da9063_regmap_config.rd_table = &da9063_bb_readable_table;
-               da9063_regmap_config.wr_table = &da9063_bb_writeable_table;
-               da9063_regmap_config.volatile_table = &da9063_bb_volatile_table;
+       ret = da9063_get_device_type(i2c, da9063);
+       if (ret)
+               return ret;
+
+       switch (da9063->type) {
+       case PMIC_TYPE_DA9063:
+               switch (da9063->variant_code) {
+               case PMIC_DA9063_AD:
+                       da9063_regmap_config.rd_table =
+                               &da9063_ad_readable_table;
+                       da9063_regmap_config.wr_table =
+                               &da9063_ad_writeable_table;
+                       da9063_regmap_config.volatile_table =
+                               &da9063_ad_volatile_table;
+                       break;
+               case PMIC_DA9063_BB:
+               case PMIC_DA9063_CA:
+                       da9063_regmap_config.rd_table =
+                               &da9063_bb_readable_table;
+                       da9063_regmap_config.wr_table =
+                               &da9063_bb_writeable_table;
+                       da9063_regmap_config.volatile_table =
+                               &da9063_bb_da_volatile_table;
+                       break;
+               case PMIC_DA9063_DA:
+                       da9063_regmap_config.rd_table =
+                               &da9063_da_readable_table;
+                       da9063_regmap_config.wr_table =
+                               &da9063_da_writeable_table;
+                       da9063_regmap_config.volatile_table =
+                               &da9063_bb_da_volatile_table;
+                       break;
+               default:
+                       dev_err(da9063->dev,
+                               "Chip variant not supported for DA9063\n");
+                       return -ENODEV;
+               }
+               break;
+       case PMIC_TYPE_DA9063L:
+               switch (da9063->variant_code) {
+               case PMIC_DA9063_BB:
+               case PMIC_DA9063_CA:
+                       da9063_regmap_config.rd_table =
+                               &da9063l_bb_readable_table;
+                       da9063_regmap_config.wr_table =
+                               &da9063l_bb_writeable_table;
+                       da9063_regmap_config.volatile_table =
+                               &da9063l_bb_da_volatile_table;
+                       break;
+               case PMIC_DA9063_DA:
+                       da9063_regmap_config.rd_table =
+                               &da9063l_da_readable_table;
+                       da9063_regmap_config.wr_table =
+                               &da9063l_da_writeable_table;
+                       da9063_regmap_config.volatile_table =
+                               &da9063l_bb_da_volatile_table;
+                       break;
+               default:
+                       dev_err(da9063->dev,
+                               "Chip variant not supported for DA9063L\n");
+                       return -ENODEV;
+               }
+               break;
+       default:
+               dev_err(da9063->dev, "Chip type not supported\n");
+               return -ENODEV;
        }
 
        da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config);
index 0452b43..a9d9c1c 100644 (file)
@@ -2276,6 +2276,8 @@ bool db8500_prcmu_is_ac_wake_requested(void)
  *
  * Saves the reset reason code and then sets the APE_SOFTRST register which
  * fires interrupt to fw
+ *
+ * @reset_code: The reason for system reset
  */
 void db8500_prcmu_system_reset(u16 reset_code)
 {
@@ -3004,10 +3006,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent)
        return mfd_add_devices(parent, 0, ab850x_cell, 1, NULL, 0, NULL);
 }
 
-/**
- * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
- *
- */
 static int db8500_prcmu_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
index 39276fa..83e676a 100644 (file)
@@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
        len = urb->actual_length - sizeof(struct dln2_header);
 
        if (handle == DLN2_HANDLE_EVENT) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dln2->event_cb_lock, flags);
                dln2_run_event_callbacks(dln2, id, echo, data, len);
+               spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
        } else {
                /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
                if (dln2_transfer_complete(dln2, urb, handle, echo))
index edfc172..eba88b8 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
  *              http://www.hisilicon.com
  * Copyright (c) <2013-2017> Linaro Ltd.
- *              http://www.linaro.org
+ *              https://www.linaro.org
  *
  * Author: Guodong Xu <guodong.xu@linaro.org>
  */
index 0462226..9a58032 100644 (file)
@@ -201,6 +201,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+       /* EBG */
+       { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
        /* GLK */
        { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
        { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
@@ -230,6 +233,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
        { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
        { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
+       /* TGL-H */
+       { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
        /* EHL */
        { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
        { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
index bd94c98..71da861 100644 (file)
@@ -91,13 +91,8 @@ static int bcove_ipc_byte_reg_write(void *context, unsigned int reg,
 {
        struct intel_soc_pmic *pmic = context;
        u8 ipc_in = val;
-       int ret;
 
-       ret = intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in);
-       if (ret)
-               return ret;
-
-       return 0;
+       return intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in);
 }
 
 static const struct regmap_config bcove_regmap_config = {
index f48e21d..52bec01 100644 (file)
@@ -79,39 +79,31 @@ enum kempld_cells {
        KEMPLD_UART,
 };
 
-static const struct mfd_cell kempld_devs[] = {
-       [KEMPLD_I2C] = {
-               .name = "kempld-i2c",
-       },
-       [KEMPLD_WDT] = {
-               .name = "kempld-wdt",
-       },
-       [KEMPLD_GPIO] = {
-               .name = "kempld-gpio",
-       },
-       [KEMPLD_UART] = {
-               .name = "kempld-uart",
-       },
+static const char *kempld_dev_names[] = {
+       [KEMPLD_I2C] = "kempld-i2c",
+       [KEMPLD_WDT] = "kempld-wdt",
+       [KEMPLD_GPIO] = "kempld-gpio",
+       [KEMPLD_UART] = "kempld-uart",
 };
 
-#define KEMPLD_MAX_DEVS        ARRAY_SIZE(kempld_devs)
+#define KEMPLD_MAX_DEVS        ARRAY_SIZE(kempld_dev_names)
 
 static int kempld_register_cells_generic(struct kempld_device_data *pld)
 {
-       struct mfd_cell devs[KEMPLD_MAX_DEVS];
+       struct mfd_cell devs[KEMPLD_MAX_DEVS] = {};
        int i = 0;
 
        if (pld->feature_mask & KEMPLD_FEATURE_BIT_I2C)
-               devs[i++] = kempld_devs[KEMPLD_I2C];
+               devs[i++].name = kempld_dev_names[KEMPLD_I2C];
 
        if (pld->feature_mask & KEMPLD_FEATURE_BIT_WATCHDOG)
-               devs[i++] = kempld_devs[KEMPLD_WDT];
+               devs[i++].name = kempld_dev_names[KEMPLD_WDT];
 
        if (pld->feature_mask & KEMPLD_FEATURE_BIT_GPIO)
-               devs[i++] = kempld_devs[KEMPLD_GPIO];
+               devs[i++].name = kempld_dev_names[KEMPLD_GPIO];
 
        if (pld->feature_mask & KEMPLD_FEATURE_MASK_UART)
-               devs[i++] = kempld_devs[KEMPLD_UART];
+               devs[i++].name = kempld_dev_names[KEMPLD_UART];
 
        return mfd_add_devices(pld->dev, -1, devs, i, NULL, 0, NULL);
 }
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
new file mode 100644 (file)
index 0000000..44d5bb4
--- /dev/null
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Khadas System control Microcontroller
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/khadas-mcu.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg)
+{
+       if (reg >= KHADAS_MCU_USER_DATA_0_REG &&
+           reg < KHADAS_MCU_PWR_OFF_CMD_REG)
+               return true;
+
+       switch (reg) {
+       case KHADAS_MCU_PWR_OFF_CMD_REG:
+       case KHADAS_MCU_PASSWD_START_REG:
+       case KHADAS_MCU_CHECK_VEN_PASSWD_REG:
+       case KHADAS_MCU_CHECK_USER_PASSWD_REG:
+       case KHADAS_MCU_WOL_INIT_START_REG:
+       case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case KHADAS_MCU_PASSWD_VEN_0_REG:
+       case KHADAS_MCU_PASSWD_VEN_1_REG:
+       case KHADAS_MCU_PASSWD_VEN_2_REG:
+       case KHADAS_MCU_PASSWD_VEN_3_REG:
+       case KHADAS_MCU_PASSWD_VEN_4_REG:
+       case KHADAS_MCU_PASSWD_VEN_5_REG:
+       case KHADAS_MCU_MAC_0_REG:
+       case KHADAS_MCU_MAC_1_REG:
+       case KHADAS_MCU_MAC_2_REG:
+       case KHADAS_MCU_MAC_3_REG:
+       case KHADAS_MCU_MAC_4_REG:
+       case KHADAS_MCU_MAC_5_REG:
+       case KHADAS_MCU_USID_0_REG:
+       case KHADAS_MCU_USID_1_REG:
+       case KHADAS_MCU_USID_2_REG:
+       case KHADAS_MCU_USID_3_REG:
+       case KHADAS_MCU_USID_4_REG:
+       case KHADAS_MCU_USID_5_REG:
+       case KHADAS_MCU_VERSION_0_REG:
+       case KHADAS_MCU_VERSION_1_REG:
+       case KHADAS_MCU_DEVICE_NO_0_REG:
+       case KHADAS_MCU_DEVICE_NO_1_REG:
+       case KHADAS_MCU_FACTORY_TEST_REG:
+       case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static const struct regmap_config khadas_mcu_regmap_config = {
+       .reg_bits       = 8,
+       .reg_stride     = 1,
+       .val_bits       = 8,
+       .max_register   = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
+       .volatile_reg   = khadas_mcu_reg_volatile,
+       .writeable_reg  = khadas_mcu_reg_writeable,
+       .cache_type     = REGCACHE_RBTREE,
+};
+
+static struct mfd_cell khadas_mcu_fan_cells[] = {
+       /* VIM1/2 Rev13+ and VIM3 only */
+       { .name = "khadas-mcu-fan-ctrl", },
+};
+
+static struct mfd_cell khadas_mcu_cells[] = {
+       { .name = "khadas-mcu-user-mem", },
+};
+
+static int khadas_mcu_probe(struct i2c_client *client,
+                      const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct khadas_mcu *ddata;
+       int ret;
+
+       ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+       if (!ddata)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, ddata);
+
+       ddata->dev = dev;
+
+       ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config);
+       if (IS_ERR(ddata->regmap)) {
+               ret = PTR_ERR(ddata->regmap);
+               dev_err(dev, "Failed to allocate register map: %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+                                  khadas_mcu_cells,
+                                  ARRAY_SIZE(khadas_mcu_cells),
+                                  NULL, 0, NULL);
+       if (ret)
+               return ret;
+
+       if (of_find_property(dev->of_node, "#cooling-cells", NULL))
+               return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+                                           khadas_mcu_fan_cells,
+                                           ARRAY_SIZE(khadas_mcu_fan_cells),
+                                           NULL, 0, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id khadas_mcu_of_match[] = {
+       { .compatible = "khadas,mcu", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, khadas_mcu_of_match);
+
+static struct i2c_driver khadas_mcu_driver = {
+       .driver = {
+               .name = "khadas-mcu-core",
+               .of_match_table = of_match_ptr(khadas_mcu_of_match),
+       },
+       .probe = khadas_mcu_probe,
+};
+module_i2c_driver(khadas_mcu_driver);
+
+MODULE_DESCRIPTION("Khadas MCU core driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL v2");
index 34fba06..2537dfa 100644 (file)
@@ -17,7 +17,6 @@
 #define LM3533_MAX_CURRENT_MAX         29800
 #define LM3533_MAX_CURRENT_STEP                800
 
-#define LM3533_BRIGHTNESS_MAX          255
 #define LM3533_PWM_MAX                 0x3f
 
 #define LM3533_REG_PWM_BASE            0x14
@@ -89,41 +88,33 @@ int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
 }
 EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
 
-#define lm3533_ctrlbank_set(_name, _NAME)                              \
-int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val)    \
-{                                                                      \
-       u8 reg;                                                         \
-       int ret;                                                        \
-                                                                       \
-       if (val > LM3533_##_NAME##_MAX)                                 \
-               return -EINVAL;                                         \
-                                                                       \
-       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);   \
-       ret = lm3533_write(cb->lm3533, reg, val);                       \
-       if (ret)                                                        \
-               dev_err(cb->dev, "failed to set " #_name "\n");         \
-                                                                       \
-       return ret;                                                     \
-}                                                                      \
-EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
-
-#define lm3533_ctrlbank_get(_name, _NAME)                              \
-int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val)   \
-{                                                                      \
-       u8 reg;                                                         \
-       int ret;                                                        \
-                                                                       \
-       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE);   \
-       ret = lm3533_read(cb->lm3533, reg, val);                        \
-       if (ret)                                                        \
-               dev_err(cb->dev, "failed to get " #_name "\n");         \
-                                                                       \
-       return ret;                                                     \
-}                                                                      \
-EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
-
-lm3533_ctrlbank_set(brightness, BRIGHTNESS);
-lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val)
+{
+       u8 reg;
+       int ret;
+
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
+       ret = lm3533_write(cb->lm3533, reg, val);
+       if (ret)
+               dev_err(cb->dev, "failed to set brightness\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_brightness);
+
+int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val)
+{
+       u8 reg;
+       int ret;
+
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE);
+       ret = lm3533_read(cb->lm3533, reg, val);
+       if (ret)
+               dev_err(cb->dev, "failed to get brightness\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_brightness);
 
 /*
  * PWM-input control mask:
@@ -135,9 +126,36 @@ lm3533_ctrlbank_get(brightness, BRIGHTNESS);
  *   bit 1 - PWM-input enabled in Zone 0
  *   bit 0 - PWM-input enabled
  */
-lm3533_ctrlbank_set(pwm, PWM);
-lm3533_ctrlbank_get(pwm, PWM);
+int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val)
+{
+       u8 reg;
+       int ret;
+
+       if (val > LM3533_PWM_MAX)
+               return -EINVAL;
+
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
+       ret = lm3533_write(cb->lm3533, reg, val);
+       if (ret)
+               dev_err(cb->dev, "failed to set PWM mask\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_pwm);
+
+int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val)
+{
+       u8 reg;
+       int ret;
 
+       reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE);
+       ret = lm3533_read(cb->lm3533, reg, val);
+       if (ret)
+               dev_err(cb->dev, "failed to get PWM mask\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_pwm);
 
 MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
 MODULE_DESCRIPTION("LM3533 Control Bank interface");
index 873c608..858c9e0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
  *
  * Author: Keerthy <j-keerthy@ti.com>
  *
index 4a5c8ad..2268be9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
  *
  * Author: Keerthy <j-keerthy@ti.com>
  */
index 7e0835c..8a8d733 100644 (file)
@@ -44,7 +44,10 @@ static const char * const madera_core_supplies[] = {
 };
 
 static const struct mfd_cell madera_ldo1_devs[] = {
-       { .name = "madera-ldo1" },
+       {
+               .name = "madera-ldo1",
+               .level = MFD_DEP_LEVEL_HIGH,
+       },
 };
 
 static const char * const cs47l15_supplies[] = {
@@ -55,8 +58,8 @@ static const char * const cs47l15_supplies[] = {
 
 static const struct mfd_cell cs47l15_devs[] = {
        { .name = "madera-pinctrl", },
-       { .name = "madera-irq" },
-       { .name = "madera-gpio" },
+       { .name = "madera-irq", },
+       { .name = "madera-gpio", },
        {
                .name = "madera-extcon",
                .parent_supplies = cs47l15_supplies,
@@ -108,7 +111,7 @@ static const char * const cs47l85_supplies[] = {
 static const struct mfd_cell cs47l85_devs[] = {
        { .name = "madera-pinctrl", },
        { .name = "madera-irq", },
-       { .name = "madera-micsupp" },
+       { .name = "madera-micsupp", },
        { .name = "madera-gpio", },
        {
                .name = "madera-extcon",
@@ -155,10 +158,10 @@ static const char * const cs47l92_supplies[] = {
 };
 
 static const struct mfd_cell cs47l92_devs[] = {
-       { .name = "madera-pinctrl" },
+       { .name = "madera-pinctrl", },
        { .name = "madera-irq", },
        { .name = "madera-micsupp", },
-       { .name = "madera-gpio" },
+       { .name = "madera-gpio", },
        {
                .name = "madera-extcon",
                .parent_supplies = cs47l92_supplies,
@@ -743,18 +746,22 @@ int madera_dev_exit(struct madera *madera)
        /* Prevent any IRQs being serviced while we clean up */
        disable_irq(madera->irq);
 
-       /*
-        * DCVDD could be supplied by a child node, we must disable it before
-        * removing the children, and prevent PM runtime from turning it back on
-        */
-       pm_runtime_disable(madera->dev);
+       pm_runtime_get_sync(madera->dev);
 
-       clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+       mfd_remove_devices(madera->dev);
+
+       pm_runtime_disable(madera->dev);
 
        regulator_disable(madera->dcvdd);
        regulator_put(madera->dcvdd);
 
-       mfd_remove_devices(madera->dev);
+       mfd_remove_devices_late(madera->dev);
+
+       pm_runtime_set_suspended(madera->dev);
+       pm_runtime_put_noidle(madera->dev);
+
+       clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+
        madera_enable_hard_reset(madera);
 
        regulator_bulk_disable(madera->num_core_supplies,
index 6b965eb..7df5b9b 100644 (file)
@@ -88,7 +88,6 @@ static int madera_i2c_probe(struct i2c_client *i2c,
        if (!madera)
                return -ENOMEM;
 
-
        madera->regmap = devm_regmap_init_i2c(i2c, regmap_16bit_config);
        if (IS_ERR(madera->regmap)) {
                ret = PTR_ERR(madera->regmap);
index fd8864c..be185e9 100644 (file)
@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(maxim_charger_currents);
 int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits,
                unsigned int min_ua, unsigned int max_ua, u8 *dst)
 {
-       unsigned int current_bits = 0xf;
+       unsigned int current_bits;
 
        if (min_ua > max_ua)
                return -EINVAL;
index f5a73af..c3651f0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
+#include <linux/list.h>
 #include <linux/property.h>
 #include <linux/mfd/core.h>
 #include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/regulator/consumer.h>
 
+static LIST_HEAD(mfd_of_node_list);
+
+struct mfd_of_node_entry {
+       struct list_head list;
+       struct device *dev;
+       struct device_node *np;
+};
+
 static struct device_type mfd_dev_type = {
        .name   = "mfd_device",
 };
@@ -107,6 +117,55 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
 }
 #endif
 
+static int mfd_match_of_node_to_dev(struct platform_device *pdev,
+                                   struct device_node *np,
+                                   const struct mfd_cell *cell)
+{
+#if IS_ENABLED(CONFIG_OF)
+       struct mfd_of_node_entry *of_entry;
+       const __be32 *reg;
+       u64 of_node_addr;
+
+       /* Skip devices 'disabled' by Device Tree */
+       if (!of_device_is_available(np))
+               return -ENODEV;
+
+       /* Skip if OF node has previously been allocated to a device */
+       list_for_each_entry(of_entry, &mfd_of_node_list, list)
+               if (of_entry->np == np)
+                       return -EAGAIN;
+
+       if (!cell->use_of_reg)
+               /* No of_reg defined - allocate first free compatible match */
+               goto allocate_of_node;
+
+       /* We only care about each node's first defined address */
+       reg = of_get_address(np, 0, NULL, NULL);
+       if (!reg)
+               /* OF node does not contatin a 'reg' property to match to */
+               return -EAGAIN;
+
+       of_node_addr = of_read_number(reg, of_n_addr_cells(np));
+
+       if (cell->of_reg != of_node_addr)
+               /* No match */
+               return -EAGAIN;
+
+allocate_of_node:
+       of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL);
+       if (!of_entry)
+               return -ENOMEM;
+
+       of_entry->dev = &pdev->dev;
+       of_entry->np = np;
+       list_add_tail(&of_entry->list, &mfd_of_node_list);
+
+       pdev->dev.of_node = np;
+       pdev->dev.fwnode = &np->fwnode;
+#endif
+       return 0;
+}
+
 static int mfd_add_device(struct device *parent, int id,
                          const struct mfd_cell *cell,
                          struct resource *mem_base,
@@ -115,6 +174,7 @@ static int mfd_add_device(struct device *parent, int id,
        struct resource *res;
        struct platform_device *pdev;
        struct device_node *np = NULL;
+       struct mfd_of_node_entry *of_entry, *tmp;
        int ret = -ENOMEM;
        int platform_id;
        int r;
@@ -149,19 +209,22 @@ static int mfd_add_device(struct device *parent, int id,
        if (ret < 0)
                goto fail_res;
 
-       if (parent->of_node && cell->of_compatible) {
+       if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) {
                for_each_child_of_node(parent->of_node, np) {
                        if (of_device_is_compatible(np, cell->of_compatible)) {
-                               if (!of_device_is_available(np)) {
-                                       /* Ignore disabled devices error free */
-                                       ret = 0;
+                               ret = mfd_match_of_node_to_dev(pdev, np, cell);
+                               if (ret == -EAGAIN)
+                                       continue;
+                               if (ret)
                                        goto fail_alias;
-                               }
-                               pdev->dev.of_node = np;
-                               pdev->dev.fwnode = &np->fwnode;
+
                                break;
                        }
                }
+
+               if (!pdev->dev.of_node)
+                       pr_warn("%s: Failed to locate of_node [id: %d]\n",
+                               cell->name, platform_id);
        }
 
        mfd_acpi_add_device(cell, pdev);
@@ -170,13 +233,13 @@ static int mfd_add_device(struct device *parent, int id,
                ret = platform_device_add_data(pdev,
                                        cell->platform_data, cell->pdata_size);
                if (ret)
-                       goto fail_alias;
+                       goto fail_of_entry;
        }
 
        if (cell->properties) {
                ret = platform_device_add_properties(pdev, cell->properties);
                if (ret)
-                       goto fail_alias;
+                       goto fail_of_entry;
        }
 
        for (r = 0; r < cell->num_resources; r++) {
@@ -213,18 +276,18 @@ static int mfd_add_device(struct device *parent, int id,
                        if (has_acpi_companion(&pdev->dev)) {
                                ret = acpi_check_resource_conflict(&res[r]);
                                if (ret)
-                                       goto fail_alias;
+                                       goto fail_of_entry;
                        }
                }
        }
 
        ret = platform_device_add_resources(pdev, res, cell->num_resources);
        if (ret)
-               goto fail_alias;
+               goto fail_of_entry;
 
        ret = platform_device_add(pdev);
        if (ret)
-               goto fail_alias;
+               goto fail_of_entry;
 
        if (cell->pm_runtime_no_callbacks)
                pm_runtime_no_callbacks(&pdev->dev);
@@ -233,6 +296,12 @@ static int mfd_add_device(struct device *parent, int id,
 
        return 0;
 
+fail_of_entry:
+       list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
+               if (of_entry->dev == &pdev->dev) {
+                       list_del(&of_entry->list);
+                       kfree(of_entry);
+               }
 fail_alias:
        regulator_bulk_unregister_supply_alias(&pdev->dev,
                                               cell->parent_supplies,
@@ -287,6 +356,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
 {
        struct platform_device *pdev;
        const struct mfd_cell *cell;
+       int *level = data;
 
        if (dev->type != &mfd_dev_type)
                return 0;
@@ -294,16 +364,31 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
        pdev = to_platform_device(dev);
        cell = mfd_get_cell(pdev);
 
+       if (level && cell->level > *level)
+               return 0;
+
        regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
                                               cell->num_parent_supplies);
 
+       kfree(cell);
+
        platform_device_unregister(pdev);
        return 0;
 }
 
+void mfd_remove_devices_late(struct device *parent)
+{
+       int level = MFD_DEP_LEVEL_HIGH;
+
+       device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
+}
+EXPORT_SYMBOL(mfd_remove_devices_late);
+
 void mfd_remove_devices(struct device *parent)
 {
-       device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn);
+       int level = MFD_DEP_LEVEL_NORMAL;
+
+       device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn);
 }
 EXPORT_SYMBOL(mfd_remove_devices);
 
@@ -318,6 +403,16 @@ static void devm_mfd_dev_release(struct device *dev, void *res)
  * Returns 0 on success or an appropriate negative error number on failure.
  * All child-devices of the MFD will automatically be removed when it gets
  * unbinded.
+ *
+ * @dev:       Pointer to parent device.
+ * @id:                Can be PLATFORM_DEVID_AUTO to let the Platform API take care
+ *             of device numbering, or will be added to a device's cell_id.
+ * @cells:     Array of (struct mfd_cell)s describing child devices.
+ * @n_devs:    Number of child devices to register.
+ * @mem_base:  Parent register range resource for child devices.
+ * @irq_base:  Base of the range of virtual interrupt numbers allocated for
+ *             this MFD device. Unused if @domain is specified.
+ * @domain:    Interrupt domain to create mappings for hardware interrupts.
  */
 int devm_mfd_add_devices(struct device *dev, int id,
                         const struct mfd_cell *cells, int n_devs,
index 52f38e5..2283d88 100644 (file)
@@ -214,6 +214,28 @@ static const struct regmap_config cpcap_regmap_config = {
        .val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int cpcap_suspend(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+
+       disable_irq(spi->irq);
+
+       return 0;
+}
+
+static int cpcap_resume(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+
+       enable_irq(spi->irq);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpcap_pm, cpcap_suspend, cpcap_resume);
+
 static const struct mfd_cell cpcap_mfd_devices[] = {
        {
                .name          = "cpcap_adc",
@@ -313,6 +335,7 @@ static struct spi_driver cpcap_driver = {
        .driver = {
                .name = "cpcap-core",
                .of_match_table = cpcap_of_match,
+               .pm = &cpcap_pm,
        },
        .probe = cpcap_probe,
 };
index 1f4f01b..1e6431c 100644 (file)
@@ -2,7 +2,7 @@
 /**
  * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI
  *
- * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com
  * Author: Keshava Munegowda <keshava_mgowda@ti.com>
  * Author: Roger Quadros <rogerq@ti.com>
  */
@@ -120,7 +120,7 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg)
 
 /*-------------------------------------------------------------------------*/
 
-/**
+/*
  * Map 'enum usbhs_omap_port_mode' found in <linux/platform_data/usb-omap.h>
  * to the device tree binding portN-mode found in
  * 'Documentation/devicetree/bindings/mfd/omap-usb-host.txt'
@@ -526,6 +526,8 @@ static const struct of_device_id usbhs_child_match_table[] = {
  * usbhs_omap_probe - initialize TI-based HCDs
  *
  * Allocates basic resources for this USB host controller.
+ *
+ * @pdev: Pointer to this device's platform device structure
  */
 static int usbhs_omap_probe(struct platform_device *pdev)
 {
index 4b7f73c..16fad79 100644 (file)
@@ -2,7 +2,7 @@
 /**
  * omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI
  *
- * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com
  * Author: Keshava Munegowda <keshava_mgowda@ti.com>
  * Author: Roger Quadros <rogerq@ti.com>
  */
@@ -199,6 +199,8 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
  * usbtll_omap_probe - initialize TI-based HCDs
  *
  * Allocates basic resources for this USB host controller.
+ *
+ * @pdev: Pointer to this device's platform device structure
  */
 static int usbtll_omap_probe(struct platform_device *pdev)
 {
index 26c7b63..abaab54 100644 (file)
@@ -96,7 +96,7 @@ struct rave_sp_deframer {
  * @data:      Buffer to store reply payload in
  * @code:      Expected reply code
  * @ackid:     Expected reply ACK ID
- * @completion: Successful reply reception completion
+ * @received:   Successful reply reception completion
  */
 struct rave_sp_reply {
        size_t length;
index 232de50..e25407e 100644 (file)
@@ -44,6 +44,9 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
        case RN5T618_INTMON:
        case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2:
        case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR:
+       case RN5T618_CHGSTATE:
+       case RN5T618_CHGCTRL_IRR ... RN5T618_CHGERR_MONI:
+       case RN5T618_CONTROL ... RN5T618_CC_AVEREG0:
                return true;
        default:
                return false;
@@ -77,7 +80,7 @@ static const struct regmap_irq_chip rc5t619_irq_chip = {
        .mask_invert = true,
 };
 
-static struct rn5t618 *rn5t618_pm_power_off;
+static struct i2c_client *rn5t618_pm_power_off;
 static struct notifier_block rn5t618_restart_handler;
 
 static int rn5t618_irq_init(struct rn5t618 *rn5t618)
@@ -110,13 +113,38 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618)
 
 static void rn5t618_trigger_poweroff_sequence(bool repower)
 {
+       int ret;
+
        /* disable automatic repower-on */
-       regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_REPCNT,
-                          RN5T618_REPCNT_REPWRON,
-                          repower ? RN5T618_REPCNT_REPWRON : 0);
+       ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_REPCNT);
+       if (ret < 0)
+               goto err;
+
+       ret &= ~RN5T618_REPCNT_REPWRON;
+       if (repower)
+               ret |= RN5T618_REPCNT_REPWRON;
+
+       ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off,
+                                       RN5T618_REPCNT, (u8)ret);
+       if (ret < 0)
+               goto err;
+
        /* start power-off sequence */
-       regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_SLPCNT,
-                          RN5T618_SLPCNT_SWPWROFF, RN5T618_SLPCNT_SWPWROFF);
+       ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_SLPCNT);
+       if (ret < 0)
+               goto err;
+
+       ret |= RN5T618_SLPCNT_SWPWROFF;
+
+       ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off,
+                                       RN5T618_SLPCNT, (u8)ret);
+       if (ret < 0)
+               goto err;
+
+       return;
+
+err:
+       dev_alert(&rn5t618_pm_power_off->dev, "Failed to shutdown (err = %d)\n", ret);
 }
 
 static void rn5t618_power_off(void)
@@ -189,7 +217,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
                return ret;
        }
 
-       rn5t618_pm_power_off = priv;
+       rn5t618_pm_power_off = i2c;
        if (of_device_is_system_power_controller(i2c->dev.of_node)) {
                if (!pm_power_off)
                        pm_power_off = rn5t618_power_off;
@@ -211,9 +239,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
 
 static int rn5t618_i2c_remove(struct i2c_client *i2c)
 {
-       struct rn5t618 *priv = i2c_get_clientdata(i2c);
-
-       if (priv == rn5t618_pm_power_off) {
+       if (i2c == rn5t618_pm_power_off) {
                rn5t618_pm_power_off = NULL;
                pm_power_off = NULL;
        }
index 4a09ce9..d15b3e7 100644 (file)
@@ -241,13 +241,13 @@ static int si476x_core_parse_and_nag_about_error(struct si476x_core *core)
 /**
  * si476x_core_send_command() - sends a command to si476x and waits its
  * response
- * @core:    si476x_device structure for the device we are
+ * @core:     si476x_device structure for the device we are
  *            communicating with
  * @command:  command id
  * @args:     command arguments we are sending
  * @argn:     actual size of @args
- * @response: buffer to place the expected response from the device
- * @respn:    actual size of @response
+ * @resp:     buffer to place the expected response from the device
+ * @respn:    actual size of @resp
  * @usecs:    amount of time to wait before reading the response (in
  *            usecs)
  *
@@ -496,7 +496,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
  *                             enable 1MOhm pulldown
  *      SI476X_DFS_DAUDIO    - set the pin to be a part of digital
  *                             audio interface
- * @dout - DOUT pin function configuration:
+ * @dout: - DOUT pin function configuration:
  *      SI476X_DOUT_NOOP       - do not modify the behaviour
  *      SI476X_DOUT_TRISTATE   - put the pin in tristate condition,
  *                               enable 1MOhm pulldown
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
  *                               port 1
  *      SI476X_DOUT_I2S_INPUT  - set this pin to be digital in on I2S
  *                               port 1
- * @xout - XOUT pin function configuration:
+ * @xout: - XOUT pin function configuration:
  *     SI476X_XOUT_NOOP        - do not modify the behaviour
  *      SI476X_XOUT_TRISTATE    - put the pin in tristate condition,
  *                                enable 1MOhm pulldown
@@ -540,25 +540,25 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg);
 
 /**
  * si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND'
- * @core - device to send the command to
- * @iqclk - IQCL pin function configuration:
+ * @core: - device to send the command to
+ * @iqclk: - IQCL pin function configuration:
  *       SI476X_IQCLK_NOOP     - do not modify the behaviour
  *       SI476X_IQCLK_TRISTATE - put the pin in tristate condition,
  *                               enable 1MOhm pulldown
  *       SI476X_IQCLK_IQ       - set pin to be a part of I/Q interace
  *                               in master mode
- * @iqfs - IQFS pin function configuration:
+ * @iqfs: - IQFS pin function configuration:
  *       SI476X_IQFS_NOOP     - do not modify the behaviour
  *       SI476X_IQFS_TRISTATE - put the pin in tristate condition,
  *                              enable 1MOhm pulldown
  *       SI476X_IQFS_IQ       - set pin to be a part of I/Q interace
  *                              in master mode
- * @iout - IOUT pin function configuration:
+ * @iout: - IOUT pin function configuration:
  *       SI476X_IOUT_NOOP     - do not modify the behaviour
  *       SI476X_IOUT_TRISTATE - put the pin in tristate condition,
  *                              enable 1MOhm pulldown
  *       SI476X_IOUT_OUTPUT   - set pin to be I out
- * @qout - QOUT pin function configuration:
+ * @qout: - QOUT pin function configuration:
  *       SI476X_QOUT_NOOP     - do not modify the behaviour
  *       SI476X_QOUT_TRISTATE - put the pin in tristate condition,
  *                              enable 1MOhm pulldown
@@ -590,29 +590,29 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg);
 /**
  * si476x_cmd_ic_link_gpo_ctl_pin_cfg - send
  * 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device
- * @core - device to send the command to
- * @icin - ICIN pin function configuration:
+ * @core: - device to send the command to
+ * @icin: - ICIN pin function configuration:
  *      SI476X_ICIN_NOOP      - do not modify the behaviour
  *      SI476X_ICIN_TRISTATE  - put the pin in tristate condition,
  *                              enable 1MOhm pulldown
  *      SI476X_ICIN_GPO1_HIGH - set pin to be an output, drive it high
  *      SI476X_ICIN_GPO1_LOW  - set pin to be an output, drive it low
  *      SI476X_ICIN_IC_LINK   - set the pin to be a part of Inter-Chip link
- * @icip - ICIP pin function configuration:
+ * @icip: - ICIP pin function configuration:
  *      SI476X_ICIP_NOOP      - do not modify the behaviour
  *      SI476X_ICIP_TRISTATE  - put the pin in tristate condition,
  *                              enable 1MOhm pulldown
  *      SI476X_ICIP_GPO1_HIGH - set pin to be an output, drive it high
  *      SI476X_ICIP_GPO1_LOW  - set pin to be an output, drive it low
  *      SI476X_ICIP_IC_LINK   - set the pin to be a part of Inter-Chip link
- * @icon - ICON pin function configuration:
+ * @icon: - ICON pin function configuration:
  *      SI476X_ICON_NOOP     - do not modify the behaviour
  *      SI476X_ICON_TRISTATE - put the pin in tristate condition,
  *                             enable 1MOhm pulldown
  *      SI476X_ICON_I2S      - set the pin to be a part of audio
  *                             interface in slave mode (DCLK)
  *      SI476X_ICON_IC_LINK  - set the pin to be a part of Inter-Chip link
- * @icop - ICOP pin function configuration:
+ * @icop: - ICOP pin function configuration:
  *      SI476X_ICOP_NOOP     - do not modify the behaviour
  *      SI476X_ICOP_TRISTATE - put the pin in tristate condition,
  *                             enable 1MOhm pulldown
@@ -647,8 +647,8 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg);
 /**
  * si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the
  * device
- * @core - device to send the command to
- * @lrout - LROUT pin function configuration:
+ * @core: - device to send the command to
+ * @lrout: - LROUT pin function configuration:
  *       SI476X_LROUT_NOOP     - do not modify the behaviour
  *       SI476X_LROUT_TRISTATE - put the pin in tristate condition,
  *                               enable 1MOhm pulldown
@@ -675,15 +675,15 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg);
 
 /**
  * si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device
- * @core - device to send the command to
- * @intb - INTB pin function configuration:
+ * @core: - device to send the command to
+ * @intb: - INTB pin function configuration:
  *      SI476X_INTB_NOOP     - do not modify the behaviour
  *      SI476X_INTB_TRISTATE - put the pin in tristate condition,
  *                             enable 1MOhm pulldown
  *      SI476X_INTB_DAUDIO   - set pin to be a part of digital
  *                             audio interface in slave mode
  *      SI476X_INTB_IRQ      - set pin to be an interrupt request line
- * @a1 - A1 pin function configuration:
+ * @a1: - A1 pin function configuration:
  *      SI476X_A1_NOOP     - do not modify the behaviour
  *      SI476X_A1_TRISTATE - put the pin in tristate condition,
  *                           enable 1MOhm pulldown
@@ -728,14 +728,10 @@ static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core,
 /**
  * si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the
  * device
- * @core  - device to send the command to
- * @rsqack - if set command clears RSQINT, SNRINT, SNRLINT, RSSIHINT,
- *           RSSSILINT, BLENDINT, MULTHINT and MULTLINT
- * @attune - when set the values in the status report are the values
- *           that were calculated at tune
- * @cancel - abort ongoing seek/tune opertation
- * @stcack - clear the STCINT bin in status register
- * @report - all signal quality information retured by the command
+ * @core:  - device to send the command to
+ * @rsqargs: - pointer to a structure containing a group of sub-args
+ *             relevant to sending the RSQ status command
+ * @report: - all signal quality information retured by the command
  *           (if NULL then the output of the command is ignored)
  *
  * Function returns 0 on success and negative error code on failure
@@ -862,9 +858,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status);
 /**
  * si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the
  * device
- * @core  - device to send the command to
- * @seekup - if set the direction of the search is 'up'
- * @wrap   - if set seek wraps when hitting band limit
+ * @core:  - device to send the command to
+ * @seekup: - if set the direction of the search is 'up'
+ * @wrap:   - if set seek wraps when hitting band limit
  *
  * This function begins search for a valid station. The station is
  * considered valid when 'FM_VALID_SNR_THRESHOLD' and
@@ -890,12 +886,14 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start);
 /**
  * si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the
  * device
- * @core - device to send the command to
- * @status_only - if set the data is not removed from RDSFIFO,
+ * @core: - device to send the command to
+ * @status_only: - if set the data is not removed from RDSFIFO,
  *                RDSFIFOUSED is not decremented and data in all the
  *                rest RDS data contains the last valid info received
- * @mtfifo if set the command clears RDS receive FIFO
- * @intack if set the command clards the RDSINT bit.
+ * @mtfifo: if set the command clears RDS receive FIFO
+ * @intack: if set the command clards the RDSINT bit.
+ * @report: - all signal quality information retured by the command
+ *           (if NULL then the output of the command is ignored)
  *
  * Function returns 0 on success and negative error code on failure
  */
@@ -1036,9 +1034,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status);
 /**
  * si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the
  * device
- * @core  - device to send the command to
- * @seekup - if set the direction of the search is 'up'
- * @wrap   - if set seek wraps when hitting band limit
+ * @core:  - device to send the command to
+ * @seekup: - if set the direction of the search is 'up'
+ * @wrap:   - if set seek wraps when hitting band limit
  *
  * This function begins search for a valid station. The station is
  * considered valid when 'FM_VALID_SNR_THRESHOLD' and
index c8d28b8..c1d7b84 100644 (file)
@@ -534,6 +534,11 @@ static irqreturn_t si476x_core_interrupt(int irq, void *dev)
 /**
  * si476x_firmware_version_to_revision()
  * @core: Core device structure
+ * @func: Selects the boot function of the device:
+ *         *_BOOTLOADER  - Boot loader
+ *         *_FM_RECEIVER - FM receiver
+ *         *_AM_RECEIVER - AM receiver
+ *         *_WB_RECEIVER - Weatherband receiver
  * @major:  Firmware major number
  * @minor1: Firmware first minor number
  * @minor2: Firmware second minor number
@@ -583,7 +588,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core,
                        goto unknown_revision;
                }
        case SI476X_FUNC_BOOTLOADER:
-       default:                /* FALLTHROUG */
+       default:                /* FALLTHROUGH */
                BUG();
                return -1;
        }
index 76eedfa..3ad35bf 100644 (file)
@@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client,
        memset(cells, 0, sizeof(cells));
        cells[0].name = "sky81452-backlight";
        cells[0].of_compatible = "skyworks,sky81452-backlight";
-       cells[0].platform_data = pdata->bl_pdata;
-       cells[0].pdata_size = sizeof(*pdata->bl_pdata);
        cells[1].name = "sky81452-regulator";
        cells[1].platform_data = pdata->regulator_init_data;
        cells[1].pdata_size = sizeof(*pdata->regulator_init_data);
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
deleted file mode 100644 (file)
index 57b792e..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * TI SMSC MFD Driver
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
- *  Free Software Foundation;  GPL v2.
- *
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/irq.h>
-#include <linux/regmap.h>
-#include <linux/err.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/smsc.h>
-#include <linux/of_platform.h>
-
-static const struct regmap_config smsc_regmap_config = {
-               .reg_bits = 8,
-               .val_bits = 8,
-               .max_register = SMSC_VEN_ID_H,
-               .cache_type = REGCACHE_RBTREE,
-};
-
-static int smsc_i2c_probe(struct i2c_client *i2c,
-                       const struct i2c_device_id *id)
-{
-       struct smsc *smsc;
-       int devid, rev, venid_l, venid_h;
-       int ret;
-
-       smsc = devm_kzalloc(&i2c->dev, sizeof(*smsc), GFP_KERNEL);
-       if (!smsc)
-               return -ENOMEM;
-
-       smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config);
-       if (IS_ERR(smsc->regmap))
-               return PTR_ERR(smsc->regmap);
-
-       i2c_set_clientdata(i2c, smsc);
-       smsc->dev = &i2c->dev;
-
-#ifdef CONFIG_OF
-       of_property_read_u32(i2c->dev.of_node, "clock", &smsc->clk);
-#endif
-
-       regmap_read(smsc->regmap, SMSC_DEV_ID, &devid);
-       regmap_read(smsc->regmap, SMSC_DEV_REV, &rev);
-       regmap_read(smsc->regmap, SMSC_VEN_ID_L, &venid_l);
-       regmap_read(smsc->regmap, SMSC_VEN_ID_H, &venid_h);
-
-       dev_info(&i2c->dev, "SMSCxxx devid: %02x rev: %02x venid: %02x\n",
-               devid, rev, (venid_h << 8) | venid_l);
-
-       ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk);
-       if (ret)
-               return ret;
-
-#ifdef CONFIG_OF
-       if (i2c->dev.of_node)
-               ret = devm_of_platform_populate(&i2c->dev);
-#endif
-
-       return ret;
-}
-
-static const struct i2c_device_id smsc_i2c_id[] = {
-       { "smscece1099", 0},
-       {},
-};
-
-static struct i2c_driver smsc_i2c_driver = {
-       .driver = {
-                  .name = "smsc",
-       },
-       .probe = smsc_i2c_probe,
-       .id_table = smsc_i2c_id,
-};
-builtin_i2c_driver(smsc_i2c_driver);
index 33336cd..f8a8b91 100644 (file)
@@ -7,7 +7,9 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mfd/core.h>
+#include <linux/mfd/sc27xx-pmic.h>
 #include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/regmap.h>
 #include <linux/spi/spi.h>
 #include <uapi/linux/usb/charger.h>
@@ -93,73 +95,6 @@ enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type);
 
-static const struct mfd_cell sprd_pmic_devs[] = {
-       {
-               .name = "sc27xx-wdt",
-               .of_compatible = "sprd,sc2731-wdt",
-       }, {
-               .name = "sc27xx-rtc",
-               .of_compatible = "sprd,sc2731-rtc",
-       }, {
-               .name = "sc27xx-charger",
-               .of_compatible = "sprd,sc2731-charger",
-       }, {
-               .name = "sc27xx-chg-timer",
-               .of_compatible = "sprd,sc2731-chg-timer",
-       }, {
-               .name = "sc27xx-fast-chg",
-               .of_compatible = "sprd,sc2731-fast-chg",
-       }, {
-               .name = "sc27xx-chg-wdt",
-               .of_compatible = "sprd,sc2731-chg-wdt",
-       }, {
-               .name = "sc27xx-typec",
-               .of_compatible = "sprd,sc2731-typec",
-       }, {
-               .name = "sc27xx-flash",
-               .of_compatible = "sprd,sc2731-flash",
-       }, {
-               .name = "sc27xx-eic",
-               .of_compatible = "sprd,sc2731-eic",
-       }, {
-               .name = "sc27xx-efuse",
-               .of_compatible = "sprd,sc2731-efuse",
-       }, {
-               .name = "sc27xx-thermal",
-               .of_compatible = "sprd,sc2731-thermal",
-       }, {
-               .name = "sc27xx-adc",
-               .of_compatible = "sprd,sc2731-adc",
-       }, {
-               .name = "sc27xx-audio-codec",
-               .of_compatible = "sprd,sc2731-audio-codec",
-       }, {
-               .name = "sc27xx-regulator",
-               .of_compatible = "sprd,sc2731-regulator",
-       }, {
-               .name = "sc27xx-vibrator",
-               .of_compatible = "sprd,sc2731-vibrator",
-       }, {
-               .name = "sc27xx-keypad-led",
-               .of_compatible = "sprd,sc2731-keypad-led",
-       }, {
-               .name = "sc27xx-bltc",
-               .of_compatible = "sprd,sc2731-bltc",
-       }, {
-               .name = "sc27xx-fgu",
-               .of_compatible = "sprd,sc2731-fgu",
-       }, {
-               .name = "sc27xx-7sreset",
-               .of_compatible = "sprd,sc2731-7sreset",
-       }, {
-               .name = "sc27xx-poweroff",
-               .of_compatible = "sprd,sc2731-poweroff",
-       }, {
-               .name = "sc27xx-syscon",
-               .of_compatible = "sprd,sc2731-syscon",
-       },
-};
-
 static int sprd_pmic_spi_write(void *context, const void *data, size_t count)
 {
        struct device *dev = context;
@@ -250,10 +185,8 @@ static int sprd_pmic_probe(struct spi_device *spi)
                return -ENOMEM;
 
        ddata->irq_chip.irqs = ddata->irqs;
-       for (i = 0; i < pdata->num_irqs; i++) {
-               ddata->irqs[i].reg_offset = i / pdata->num_irqs;
-               ddata->irqs[i].mask = BIT(i % pdata->num_irqs);
-       }
+       for (i = 0; i < pdata->num_irqs; i++)
+               ddata->irqs[i].mask = BIT(i);
 
        ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
                                       IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
@@ -263,12 +196,9 @@ static int sprd_pmic_probe(struct spi_device *spi)
                return ret;
        }
 
-       ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO,
-                                  sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs),
-                                  NULL, 0,
-                                  regmap_irq_get_domain(ddata->irq_data));
+       ret = devm_of_platform_populate(&spi->dev);
        if (ret) {
-               dev_err(&spi->dev, "Failed to register device %d\n", ret);
+               dev_err(&spi->dev, "Failed to populate sub-devices %d\n", ret);
                return ret;
        }
 
index a00f99f..746e51a 100644 (file)
@@ -17,6 +17,7 @@ static const struct regmap_config stm32_lptimer_regmap_cfg = {
        .val_bits = 32,
        .reg_stride = sizeof(u32),
        .max_register = STM32_LPTIM_MAX_REGISTER,
+       .fast_io = true,
 };
 
 static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
index 3a97816..75859e4 100644 (file)
@@ -101,12 +101,14 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
                }
        }
 
-       syscon_config.name = of_node_full_name(np);
+       syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np,
+                                      (u64)res.start);
        syscon_config.reg_stride = reg_io_width;
        syscon_config.val_bits = reg_io_width * 8;
        syscon_config.max_register = resource_size(&res) - reg_io_width;
 
        regmap = regmap_init_mmio(NULL, base, &syscon_config);
+       kfree(syscon_config.name);
        if (IS_ERR(regmap)) {
                pr_err("regmap init failed\n");
                ret = PTR_ERR(regmap);
index 67c9995..7882a37 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/mfd/tc3589x.h>
 #include <linux/err.h>
 
-/**
+/*
  * enum tc3589x_version - indicates the TC3589x version
  */
 enum tc3589x_version {
index 926c289..0e6e253 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * TI Touch Screen / ADC MFD driver
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index 65fcc58..7e7dbee 100644 (file)
@@ -404,7 +404,6 @@ static void tps65010_work(struct work_struct *work)
        tps65010_interrupt(tps);
 
        if (test_and_clear_bit(FLAG_VBUS_CHANGED, &tps->flags)) {
-               int     status;
                u8      chgconfig, tmp;
 
                chgconfig = i2c_smbus_read_byte_data(tps->client,
@@ -415,8 +414,8 @@ static void tps65010_work(struct work_struct *work)
                else if (tps->vbus >= 100)
                        chgconfig |= TPS_VBUS_CHARGING;
 
-               status = i2c_smbus_write_byte_data(tps->client,
-                               TPS_CHGCONFIG, chgconfig);
+               i2c_smbus_write_byte_data(tps->client,
+                                         TPS_CHGCONFIG, chgconfig);
 
                /* vbus update fails unless VBUS is connected! */
                tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG);
index 43119a6..341466e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index 7566ce4..2d9c282 100644 (file)
@@ -3,7 +3,7 @@
  *
  * TPS65217 chip family multi-function driver
  *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -205,7 +205,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_read);
 /**
  * tps65217_reg_write: Write a single tps65217 register.
  *
- * @tps65217: Device to write to.
+ * @tps: Device to write to.
  * @reg: Register to write to.
  * @val: Value to write.
  * @level: Password protected level
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
 /**
  * tps65217_update_bits: Modify bits w.r.t mask, val and level.
  *
- * @tps65217: Device to write to.
+ * @tps: Device to write to.
  * @reg: Register to read-write to.
  * @mask: Mask.
  * @val: Value to write.
index a62ea4c..167e9fc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Driver for TPS65218 Integrated power management chipsets
  *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License version 2 as
@@ -48,7 +48,7 @@ static const struct mfd_cell tps65218_cells[] = {
 /**
  * tps65218_reg_write: Write a single tps65218 register.
  *
- * @tps65218: Device to write to.
+ * @tps: Device to write to.
  * @reg: Register to write to.
  * @val: Value to write.
  * @level: Password protected level
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(tps65218_reg_write);
 /**
  * tps65218_update_bits: Modify bits w.r.t mask, val and level.
  *
- * @tps65218: Device to write to.
+ * @tps: Device to write to.
  * @reg: Register to read-write to.
  * @mask: Mask.
  * @val: Value to write.
index c8aadd3..c365977 100644 (file)
@@ -309,18 +309,19 @@ static const struct irq_domain_ops tps6586x_domain_ops = {
 static irqreturn_t tps6586x_irq(int irq, void *data)
 {
        struct tps6586x *tps6586x = data;
-       u32 acks;
+       uint32_t acks;
+       __le32 val;
        int ret = 0;
 
        ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1,
-                            sizeof(acks), (uint8_t *)&acks);
+                            sizeof(acks), (uint8_t *)&val);
 
        if (ret < 0) {
                dev_err(tps6586x->dev, "failed to read interrupt status\n");
                return IRQ_NONE;
        }
 
-       acks = le32_to_cpu(acks);
+       acks = le32_to_cpu(val);
 
        while (acks) {
                int i = __ffs(acks);
index f33567b..b55b1d5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Core functions for TI TPS65912x PMICs
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index 785d19f..f7c22ea 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * I2C access driver for TI TPS65912x PMICs
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index f78be03..21a8d6a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * SPI access driver for TI TPS65912x PMICs
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index 910a304..ab41743 100644 (file)
@@ -477,7 +477,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
 
        if (agent->imr_change_pending) {
                union {
-                       u32     word;
+                       __le32  word;
                        u8      bytes[4];
                } imr;
 
@@ -561,7 +561,7 @@ static inline int sih_read_isr(const struct sih *sih)
        int status;
        union {
                u8 bytes[4];
-               u32 word;
+               __le32 word;
        } isr;
 
        /* FIXME need retry-on-error ... */
index 02f879b..b0344e5 100644 (file)
@@ -114,6 +114,8 @@ static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
  * The WM831x has a user key preventing writes to particularly
  * critical registers.  This function locks those registers,
  * allowing writes to them.
+ *
+ * @wm831x: pointer to local driver data structure
  */
 void wm831x_reg_lock(struct wm831x *wm831x)
 {
@@ -140,6 +142,8 @@ EXPORT_SYMBOL_GPL(wm831x_reg_lock);
  * The WM831x has a user key preventing writes to particularly
  * critical registers.  This function locks those registers,
  * preventing spurious writes.
+ *
+ * @wm831x: pointer to local driver data structure
  */
 int wm831x_reg_unlock(struct wm831x *wm831x)
 {
index 42b1650..fbc77b2 100644 (file)
@@ -131,6 +131,8 @@ EXPORT_SYMBOL_GPL(wm8350_block_write);
  * The WM8350 has a hardware lock which can be used to prevent writes to
  * some registers (generally those which can cause particularly serious
  * problems if misused).  This function enables that lock.
+ *
+ * @wm8350: pointer to local driver data structure
  */
 int wm8350_reg_lock(struct wm8350 *wm8350)
 {
@@ -160,6 +162,8 @@ EXPORT_SYMBOL_GPL(wm8350_reg_lock);
  * problems if misused).  This function disables that lock so updates
  * can be performed.  For maximum safety this should be done only when
  * required.
+ *
+ * @wm8350: pointer to local driver data structure
  */
 int wm8350_reg_unlock(struct wm8350 *wm8350)
 {
index 3055d6f..0fe32a0 100644 (file)
@@ -108,6 +108,8 @@ static const struct regmap_config wm8400_regmap_config = {
 /**
  * wm8400_reset_codec_reg_cache - Reset cached codec registers to
  * their default values.
+ *
+ * @wm8400: pointer to local driver data structure
  */
 void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
 {
index a7e47e0..aef1499 100644 (file)
@@ -11,7 +11,7 @@ config MTD_CFI
          AMD and other flash manufactures that provides a universal method
          for probing the capabilities of flash devices. If you wish to
          support any device that is CFI-compliant, you need to enable this
-         option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
+         option. Visit <https://www.amd.com/products/nvd/overview/cfi.html>
          for more information on CFI.
 
 config MTD_JEDECPROBE
index f350a08..e0e33f6 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
 // Author: Vignesh Raghavendra <vigneshr@ti.com>
 
 #include <linux/err.h>
index 32685e8..2f9fc4e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
 // Author: Vignesh Raghavendra <vigneshr@ti.com>
 
 #include <linux/err.h>
index b28225a..fd37553 100644 (file)
@@ -310,7 +310,7 @@ config MTD_DC21285
        help
          This provides a driver for the flash accessed using Intel's
          21285 bridge used with Intel's StrongARM processors. More info at
-         <http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
+         <https://www.intel.com/design/bridge/docs/21285_documentation.htm>.
 
 config MTD_IXP4XX
        tristate "CFI Flash device mapped on Intel IXP4xx based systems"
index 9902b37..8ef7aec 100644 (file)
@@ -6,7 +6,7 @@
  * The SC520CDP is an evaluation board for the Elan SC520 processor available
  * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
  * and up to 512 KiB of 8-bit DIL Flash ROM.
- * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
+ * For details see https://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html
  */
 
 #include <linux/module.h>
index a5d8a21..c1a45b0 100644 (file)
@@ -1,7 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0-only
+
+menu "NAND"
+
 config MTD_NAND_CORE
        tristate
 
 source "drivers/mtd/nand/onenand/Kconfig"
 source "drivers/mtd/nand/raw/Kconfig"
 source "drivers/mtd/nand/spi/Kconfig"
+
+endmenu
index 572b8fe..1a0e65b 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig MTD_ONENAND
        tristate "OneNAND Device Support"
-       depends on MTD
        depends on HAS_IOMEM
        help
          This enables support for accessing all type of OneNAND flash
index 113f610..1203775 100644 (file)
@@ -12,7 +12,6 @@ config MTD_NAND_ECC_SW_HAMMING_SMC
 
 menuconfig MTD_RAW_NAND
        tristate "Raw/Parallel NAND Device Support"
-       depends on MTD
        select MTD_NAND_CORE
        select MTD_NAND_ECC_SW_HAMMING
        help
@@ -415,6 +414,7 @@ config MTD_NAND_TEGRA
 config MTD_NAND_STM32_FMC2
        tristate "Support for NAND controller on STM32MP SoCs"
        depends on MACH_STM32MP157 || COMPILE_TEST
+       select MFD_SYSCON
        help
          Enables support for NAND Flash chips on SoCs containing the FMC2
          NAND controller. This controller is found on STM32MP SoCs.
index 3711e7a..fdba155 100644 (file)
@@ -191,8 +191,8 @@ static int gpio_nand_exec_op(struct nand_chip *this,
        return ret;
 }
 
-static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
-                                         const struct nand_data_interface *cf)
+static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
+                                    const struct nand_interface_config *cf)
 {
        struct gpio_nand *priv = nand_get_controller_data(this);
        const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
@@ -217,7 +217,7 @@ static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline,
 
 static const struct nand_controller_ops gpio_nand_ops = {
        .exec_op = gpio_nand_exec_op,
-       .setup_data_interface = gpio_nand_setup_data_interface,
+       .setup_interface = gpio_nand_setup_interface,
 };
 
 /*
index 7141dcc..12c643e 100644 (file)
@@ -854,8 +854,8 @@ static int anfc_exec_op(struct nand_chip *chip,
        return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
 }
 
-static int anfc_setup_data_interface(struct nand_chip *chip, int target,
-                                    const struct nand_data_interface *conf)
+static int anfc_setup_interface(struct nand_chip *chip, int target,
+                               const struct nand_interface_config *conf)
 {
        struct anand *anand = to_anand(chip);
        struct arasan_nfc *nfc = to_anfc(chip->controller);
@@ -1083,7 +1083,7 @@ static void anfc_detach_chip(struct nand_chip *chip)
 
 static const struct nand_controller_ops anfc_ops = {
        .exec_op = anfc_exec_op,
-       .setup_data_interface = anfc_setup_data_interface,
+       .setup_interface = anfc_setup_interface,
        .attach_chip = anfc_attach_chip,
        .detach_chip = anfc_detach_chip,
 };
index 46a3724..c9818f5 100644 (file)
@@ -200,8 +200,8 @@ struct atmel_nand_controller_ops {
        void (*nand_init)(struct atmel_nand_controller *nc,
                          struct atmel_nand *nand);
        int (*ecc_init)(struct nand_chip *chip);
-       int (*setup_data_interface)(struct atmel_nand *nand, int csline,
-                                   const struct nand_data_interface *conf);
+       int (*setup_interface)(struct atmel_nand *nand, int csline,
+                              const struct nand_interface_config *conf);
 };
 
 struct atmel_nand_controller_caps {
@@ -1168,7 +1168,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
 }
 
 static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
-                                       const struct nand_data_interface *conf,
+                                       const struct nand_interface_config *conf,
                                        struct atmel_smc_cs_conf *smcconf)
 {
        u32 ncycles, totalcycles, timeps, mckperiodps;
@@ -1397,9 +1397,9 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
        return 0;
 }
 
-static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
+static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
                                        int csline,
-                                       const struct nand_data_interface *conf)
+                                       const struct nand_interface_config *conf)
 {
        struct atmel_nand_controller *nc;
        struct atmel_smc_cs_conf smcconf;
@@ -1422,9 +1422,9 @@ static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
        return 0;
 }
 
-static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
+static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
                                        int csline,
-                                       const struct nand_data_interface *conf)
+                                       const struct nand_interface_config *conf)
 {
        struct atmel_hsmc_nand_controller *nc;
        struct atmel_smc_cs_conf smcconf;
@@ -1452,8 +1452,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
        return 0;
 }
 
-static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline,
-                                       const struct nand_data_interface *conf)
+static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
+                                     const struct nand_interface_config *conf)
 {
        struct atmel_nand *nand = to_atmel_nand(chip);
        struct atmel_nand_controller *nc;
@@ -1464,7 +1464,7 @@ static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline,
            (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
                return -EINVAL;
 
-       return nc->caps->ops->setup_data_interface(nand, csline, conf);
+       return nc->caps->ops->setup_interface(nand, csline, conf);
 }
 
 static void atmel_nand_init(struct atmel_nand_controller *nc,
@@ -1483,7 +1483,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
        chip->legacy.write_buf = atmel_nand_write_buf;
        chip->legacy.select_chip = atmel_nand_select_chip;
 
-       if (!nc->mck || !nc->caps->ops->setup_data_interface)
+       if (!nc->mck || !nc->caps->ops->setup_interface)
                chip->options |= NAND_KEEP_TIMINGS;
 
        /* Some NANDs require a longer delay than the default one (20us). */
@@ -1956,7 +1956,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip)
 
 static const struct nand_controller_ops atmel_nand_controller_ops = {
        .attach_chip = atmel_nand_attach_chip,
-       .setup_data_interface = atmel_nand_setup_data_interface,
+       .setup_interface = atmel_nand_setup_interface,
 };
 
 static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
@@ -2318,7 +2318,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
        .remove = atmel_hsmc_nand_controller_remove,
        .ecc_init = atmel_hsmc_nand_ecc_init,
        .nand_init = atmel_hsmc_nand_init,
-       .setup_data_interface = atmel_hsmc_nand_setup_data_interface,
+       .setup_interface = atmel_hsmc_nand_setup_interface,
 };
 
 static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
@@ -2375,10 +2375,10 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
 
 /*
  * The SMC reg layout of at91rm9200 is completely different which prevents us
- * from re-using atmel_smc_nand_setup_data_interface() for the
- * ->setup_data_interface() hook.
+ * from re-using atmel_smc_nand_setup_interface() for the
+ * ->setup_interface() hook.
  * At this point, there's no support for the at91rm9200 SMC IP, so we leave
- * ->setup_data_interface() unassigned.
+ * ->setup_interface() unassigned.
  */
 static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
        .probe = atmel_smc_nand_controller_probe,
@@ -2399,7 +2399,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
        .remove = atmel_smc_nand_controller_remove,
        .ecc_init = atmel_nand_ecc_init,
        .nand_init = atmel_smc_nand_init,
-       .setup_data_interface = atmel_smc_nand_setup_data_interface,
+       .setup_interface = atmel_smc_nand_setup_interface,
 };
 
 static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
index 44068e9..a4033d3 100644 (file)
@@ -1918,6 +1918,22 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
        edu_writel(ctrl, EDU_STOP, 0); /* force stop */
        edu_readl(ctrl, EDU_STOP);
 
+       if (!ret && edu_cmd == EDU_CMD_READ) {
+               u64 err_addr = 0;
+
+               /*
+                * check for ECC errors here, subpage ECC errors are
+                * retained in ECC error address register
+                */
+               err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+               if (!err_addr) {
+                       err_addr = brcmnand_get_correcc_addr(ctrl);
+                       if (err_addr)
+                               ret = -EUCLEAN;
+               } else
+                       ret = -EBADMSG;
+       }
+
        return ret;
 }
 
@@ -2124,6 +2140,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
        u64 err_addr = 0;
        int err;
        bool retry = true;
+       bool edu_err = false;
 
        dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
 
@@ -2141,6 +2158,10 @@ try_dmaread:
                        else
                                return -EIO;
                }
+
+               if (has_edu(ctrl) && err_addr)
+                       edu_err = true;
+
        } else {
                if (oob)
                        memset(oob, 0x99, mtd->oobsize);
@@ -2188,6 +2209,11 @@ try_dmaread:
        if (mtd_is_bitflip(err)) {
                unsigned int corrected = brcmnand_count_corrected(ctrl);
 
+               /* in case of EDU correctable error we read again using PIO */
+               if (edu_err)
+                       err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+                                                  oob, &err_addr);
+
                dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
                        (unsigned long long)err_addr);
                mtd->ecc_stats.corrected += corrected;
@@ -3023,8 +3049,9 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
                if (ret < 0)
                        goto err;
 
-               /* set edu transfer function to call */
-               ctrl->dma_trans = brcmnand_edu_trans;
+               if (has_edu(ctrl))
+                       /* set edu transfer function to call */
+                       ctrl->dma_trans = brcmnand_edu_trans;
        }
 
        /* Disable automatic device ID config, direct addressing */
index c4f273e..71516af 100644 (file)
@@ -2304,8 +2304,8 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
 }
 
 static int
-cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                 const struct nand_data_interface *conf)
+cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
+                            const struct nand_interface_config *conf)
 {
        const struct nand_sdr_timings *sdr;
        struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
@@ -2691,7 +2691,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip)
 static const struct nand_controller_ops cadence_nand_controller_ops = {
        .attach_chip = cadence_nand_attach_chip,
        .exec_op = cadence_nand_exec_op,
-       .setup_data_interface = cadence_nand_setup_data_interface,
+       .setup_interface = cadence_nand_setup_interface,
 };
 
 static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
index 4e6e157..9d99dad 100644 (file)
@@ -761,8 +761,8 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf,
        return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
 }
 
-static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                      const struct nand_data_interface *conf)
+static int denali_setup_interface(struct nand_chip *chip, int chipnr,
+                                 const struct nand_interface_config *conf)
 {
        static const unsigned int data_setup_on_host = 10000;
        struct denali_controller *denali = to_denali_controller(chip);
@@ -1173,7 +1173,7 @@ static int denali_exec_op(struct nand_chip *chip,
 static const struct nand_controller_ops denali_controller_ops = {
        .attach_chip = denali_attach_chip,
        .exec_op = denali_exec_op,
-       .setup_data_interface = denali_setup_data_interface,
+       .setup_interface = denali_setup_interface,
 };
 
 int denali_chip_init(struct denali_controller *denali,
@@ -1230,7 +1230,7 @@ int denali_chip_init(struct denali_controller *denali,
                chip->buf_align = 16;
        }
 
-       /* clk rate info is needed for setup_data_interface */
+       /* clk rate info is needed for setup_interface */
        if (!denali->clk_rate || !denali->clk_x_rate)
                chip->options |= NAND_KEEP_TIMINGS;
 
index 627deb2..197850a 100644 (file)
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/mtd.h>
-#include <linux/of_address.h>
 #include <linux/of_platform.h>
-#include <linux/of_gpio.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <asm/fsl_lbc.h>
 
-#define FSL_UPM_WAIT_RUN_PATTERN  0x1
-#define FSL_UPM_WAIT_WRITE_BYTE   0x2
-#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
-
 struct fsl_upm_nand {
+       struct nand_controller base;
        struct device *dev;
        struct nand_chip chip;
-       int last_ctrl;
-       struct mtd_partition *parts;
        struct fsl_upm upm;
        uint8_t upm_addr_offset;
        uint8_t upm_cmd_offset;
        void __iomem *io_base;
-       int rnb_gpio[NAND_MAX_CHIPS];
+       struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS];
        uint32_t mchip_offsets[NAND_MAX_CHIPS];
        uint32_t mchip_count;
        uint32_t mchip_number;
-       int chip_delay;
-       uint32_t wait_flags;
 };
 
 static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
@@ -48,106 +39,6 @@ static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
                            chip);
 }
 
-static int fun_chip_ready(struct nand_chip *chip)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-       if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
-               return 1;
-
-       dev_vdbg(fun->dev, "busy\n");
-       return 0;
-}
-
-static void fun_wait_rnb(struct fsl_upm_nand *fun)
-{
-       if (fun->rnb_gpio[fun->mchip_number] >= 0) {
-               struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-               int cnt = 1000000;
-
-               while (--cnt && !fun_chip_ready(&fun->chip))
-                       cpu_relax();
-               if (!cnt)
-                       dev_err(fun->dev, "tired waiting for RNB\n");
-       } else {
-               ndelay(100);
-       }
-}
-
-static void fun_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-       u32 mar;
-
-       if (!(ctrl & fun->last_ctrl)) {
-               fsl_upm_end_pattern(&fun->upm);
-
-               if (cmd == NAND_CMD_NONE)
-                       return;
-
-               fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
-       }
-
-       if (ctrl & NAND_CTRL_CHANGE) {
-               if (ctrl & NAND_ALE)
-                       fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
-               else if (ctrl & NAND_CLE)
-                       fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
-       }
-
-       mar = (cmd << (32 - fun->upm.width)) |
-               fun->mchip_offsets[fun->mchip_number];
-       fsl_upm_run_pattern(&fun->upm, chip->legacy.IO_ADDR_R, mar);
-
-       if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
-               fun_wait_rnb(fun);
-}
-
-static void fun_select_chip(struct nand_chip *chip, int mchip_nr)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-       if (mchip_nr == -1) {
-               chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-       } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
-               fun->mchip_number = mchip_nr;
-               chip->legacy.IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
-               chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
-       } else {
-               BUG();
-       }
-}
-
-static uint8_t fun_read_byte(struct nand_chip *chip)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-
-       return in_8(fun->chip.legacy.IO_ADDR_R);
-}
-
-static void fun_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-       int i;
-
-       for (i = 0; i < len; i++)
-               buf[i] = in_8(fun->chip.legacy.IO_ADDR_R);
-}
-
-static void fun_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
-{
-       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
-       int i;
-
-       for (i = 0; i < len; i++) {
-               out_8(fun->chip.legacy.IO_ADDR_W, buf[i]);
-               if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
-                       fun_wait_rnb(fun);
-       }
-       if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
-               fun_wait_rnb(fun);
-}
-
 static int fun_chip_init(struct fsl_upm_nand *fun,
                         const struct device_node *upm_np,
                         const struct resource *io_res)
@@ -156,21 +47,9 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
        int ret;
        struct device_node *flash_np;
 
-       fun->chip.legacy.IO_ADDR_R = fun->io_base;
-       fun->chip.legacy.IO_ADDR_W = fun->io_base;
-       fun->chip.legacy.cmd_ctrl = fun_cmd_ctrl;
-       fun->chip.legacy.chip_delay = fun->chip_delay;
-       fun->chip.legacy.read_byte = fun_read_byte;
-       fun->chip.legacy.read_buf = fun_read_buf;
-       fun->chip.legacy.write_buf = fun_write_buf;
        fun->chip.ecc.mode = NAND_ECC_SOFT;
        fun->chip.ecc.algo = NAND_ECC_HAMMING;
-       if (fun->mchip_count > 1)
-               fun->chip.legacy.select_chip = fun_select_chip;
-
-       if (fun->rnb_gpio[0] >= 0)
-               fun->chip.legacy.dev_ready = fun_chip_ready;
-
+       fun->chip.controller = &fun->base;
        mtd->dev.parent = fun->dev;
 
        flash_np = of_get_next_child(upm_np, NULL);
@@ -178,8 +57,9 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
                return -ENODEV;
 
        nand_set_flash_node(&fun->chip, flash_np);
-       mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start,
-                             flash_np);
+       mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn",
+                                  (u64)io_res->start,
+                                  flash_np);
        if (!mtd->name) {
                ret = -ENOMEM;
                goto err;
@@ -192,51 +72,130 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
        ret = mtd_device_register(mtd, NULL, 0);
 err:
        of_node_put(flash_np);
-       if (ret)
-               kfree(mtd->name);
        return ret;
 }
 
+static int func_exec_instr(struct nand_chip *chip,
+                          const struct nand_op_instr *instr)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+       u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number];
+       unsigned int i;
+       const u8 *out;
+       u8 *in;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+               mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) |
+                     reg_offs;
+               fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+               fsl_upm_end_pattern(&fun->upm);
+               return 0;
+
+       case NAND_OP_ADDR_INSTR:
+               fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+               for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+                       mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) |
+                             reg_offs;
+                       fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
+               }
+               fsl_upm_end_pattern(&fun->upm);
+               return 0;
+
+       case NAND_OP_DATA_IN_INSTR:
+               in = instr->ctx.data.buf.in;
+               for (i = 0; i < instr->ctx.data.len; i++)
+                       in[i] = in_8(fun->io_base + reg_offs);
+               return 0;
+
+       case NAND_OP_DATA_OUT_INSTR:
+               out = instr->ctx.data.buf.out;
+               for (i = 0; i < instr->ctx.data.len; i++)
+                       out_8(fun->io_base + reg_offs, out[i]);
+               return 0;
+
+       case NAND_OP_WAITRDY_INSTR:
+               if (!fun->rnb_gpio[fun->mchip_number])
+                       return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+               return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number],
+                                        instr->ctx.waitrdy.timeout_ms);
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+                      bool check_only)
+{
+       struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
+       unsigned int i;
+       int ret;
+
+       if (op->cs > NAND_MAX_CHIPS)
+               return -EINVAL;
+
+       if (check_only)
+               return 0;
+
+       fun->mchip_number = op->cs;
+
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = func_exec_instr(chip, &op->instrs[i]);
+               if (ret)
+                       return ret;
+
+               if (op->instrs[i].delay_ns)
+                       ndelay(op->instrs[i].delay_ns);
+       }
+
+       return 0;
+}
+
+static const struct nand_controller_ops fun_ops = {
+       .exec_op = fun_exec_op,
+};
+
 static int fun_probe(struct platform_device *ofdev)
 {
        struct fsl_upm_nand *fun;
-       struct resource io_res;
+       struct resource *io_res;
        const __be32 *prop;
-       int rnb_gpio;
        int ret;
        int size;
        int i;
 
-       fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+       fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL);
        if (!fun)
                return -ENOMEM;
 
-       ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
-       if (ret) {
-               dev_err(&ofdev->dev, "can't get IO base\n");
-               goto err1;
-       }
+       io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+       fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+       if (IS_ERR(fun->io_base))
+               return PTR_ERR(fun->io_base);
 
-       ret = fsl_upm_find(io_res.start, &fun->upm);
+       ret = fsl_upm_find(io_res->start, &fun->upm);
        if (ret) {
                dev_err(&ofdev->dev, "can't find UPM\n");
-               goto err1;
+               return ret;
        }
 
        prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
                               &size);
        if (!prop || size != sizeof(uint32_t)) {
                dev_err(&ofdev->dev, "can't get UPM address offset\n");
-               ret = -EINVAL;
-               goto err1;
+               return -EINVAL;
        }
        fun->upm_addr_offset = *prop;
 
        prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
        if (!prop || size != sizeof(uint32_t)) {
                dev_err(&ofdev->dev, "can't get UPM command offset\n");
-               ret = -EINVAL;
-               goto err1;
+               return -EINVAL;
        }
        fun->upm_cmd_offset = *prop;
 
@@ -246,7 +205,7 @@ static int fun_probe(struct platform_device *ofdev)
                fun->mchip_count = size / sizeof(uint32_t);
                if (fun->mchip_count >= NAND_MAX_CHIPS) {
                        dev_err(&ofdev->dev, "too much multiple chips\n");
-                       goto err1;
+                       return -EINVAL;
                }
                for (i = 0; i < fun->mchip_count; i++)
                        fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
@@ -255,63 +214,26 @@ static int fun_probe(struct platform_device *ofdev)
        }
 
        for (i = 0; i < fun->mchip_count; i++) {
-               fun->rnb_gpio[i] = -1;
-               rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
-               if (rnb_gpio >= 0) {
-                       ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
-                       if (ret) {
-                               dev_err(&ofdev->dev,
-                                       "can't request RNB gpio #%d\n", i);
-                               goto err2;
-                       }
-                       gpio_direction_input(rnb_gpio);
-                       fun->rnb_gpio[i] = rnb_gpio;
-               } else if (rnb_gpio == -EINVAL) {
+               fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev,
+                                                                NULL, i,
+                                                                GPIOD_IN);
+               if (IS_ERR(fun->rnb_gpio[i])) {
                        dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
-                       goto err2;
+                       return PTR_ERR(fun->rnb_gpio[i]);
                }
        }
 
-       prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
-       if (prop)
-               fun->chip_delay = be32_to_cpup(prop);
-       else
-               fun->chip_delay = 50;
-
-       prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
-       if (prop && size == sizeof(uint32_t))
-               fun->wait_flags = be32_to_cpup(prop);
-       else
-               fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
-                                 FSL_UPM_WAIT_WRITE_BYTE;
-
-       fun->io_base = devm_ioremap(&ofdev->dev, io_res.start,
-                                           resource_size(&io_res));
-       if (!fun->io_base) {
-               ret = -ENOMEM;
-               goto err2;
-       }
-
+       nand_controller_init(&fun->base);
+       fun->base.ops = &fun_ops;
        fun->dev = &ofdev->dev;
-       fun->last_ctrl = NAND_CLE;
 
-       ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
+       ret = fun_chip_init(fun, ofdev->dev.of_node, io_res);
        if (ret)
-               goto err2;
+               return ret;
 
        dev_set_drvdata(&ofdev->dev, fun);
 
        return 0;
-err2:
-       for (i = 0; i < fun->mchip_count; i++) {
-               if (fun->rnb_gpio[i] < 0)
-                       break;
-               gpio_free(fun->rnb_gpio[i]);
-       }
-err1:
-       kfree(fun);
-
-       return ret;
 }
 
 static int fun_remove(struct platform_device *ofdev)
@@ -319,20 +241,11 @@ static int fun_remove(struct platform_device *ofdev)
        struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
        struct nand_chip *chip = &fun->chip;
        struct mtd_info *mtd = nand_to_mtd(chip);
-       int ret, i;
+       int ret;
 
        ret = mtd_device_unregister(mtd);
        WARN_ON(ret);
        nand_cleanup(chip);
-       kfree(mtd->name);
-
-       for (i = 0; i < fun->mchip_count; i++) {
-               if (fun->rnb_gpio[i] < 0)
-                       break;
-               gpio_free(fun->rnb_gpio[i]);
-       }
-
-       kfree(fun);
 
        return 0;
 }
index 3909752..92ddc41 100644 (file)
@@ -327,8 +327,8 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        return 0;
 }
 
-static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
-                                    const struct nand_data_interface *conf)
+static int fsmc_setup_interface(struct nand_chip *nand, int csline,
+                               const struct nand_interface_config *conf)
 {
        struct fsmc_nand_data *host = nand_to_fsmc(nand);
        struct fsmc_nand_timings tims;
@@ -951,7 +951,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
 static const struct nand_controller_ops fsmc_nand_controller_ops = {
        .attach_chip = fsmc_nand_attach_chip,
        .exec_op = fsmc_exec_op,
-       .setup_data_interface = fsmc_setup_data_interface,
+       .setup_interface = fsmc_setup_interface,
 };
 
 /**
index 938077e..3bd847c 100644 (file)
 #include <linux/mtd/nand-gpio.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/delay.h>
 
 struct gpiomtd {
+       struct nand_controller  base;
+       void __iomem            *io;
        void __iomem            *io_sync;
        struct nand_chip        nand_chip;
        struct gpio_nand_platdata plat;
@@ -69,34 +72,99 @@ static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
 static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
 #endif
 
-static void gpio_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
-                              unsigned int ctrl)
+static int gpio_nand_exec_instr(struct nand_chip *chip,
+                               const struct nand_op_instr *instr)
 {
        struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+       unsigned int i;
 
-       gpio_nand_dosync(gpiomtd);
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               gpio_nand_dosync(gpiomtd);
+               gpiod_set_value(gpiomtd->cle, 1);
+               gpio_nand_dosync(gpiomtd);
+               writeb(instr->ctx.cmd.opcode, gpiomtd->io);
+               gpio_nand_dosync(gpiomtd);
+               gpiod_set_value(gpiomtd->cle, 0);
+               return 0;
+
+       case NAND_OP_ADDR_INSTR:
+               gpio_nand_dosync(gpiomtd);
+               gpiod_set_value(gpiomtd->ale, 1);
+               gpio_nand_dosync(gpiomtd);
+               for (i = 0; i < instr->ctx.addr.naddrs; i++)
+                       writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
+               gpio_nand_dosync(gpiomtd);
+               gpiod_set_value(gpiomtd->ale, 0);
+               return 0;
+
+       case NAND_OP_DATA_IN_INSTR:
+               gpio_nand_dosync(gpiomtd);
+               if ((chip->options & NAND_BUSWIDTH_16) &&
+                   !instr->ctx.data.force_8bit)
+                       ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
+                                    instr->ctx.data.len / 2);
+               else
+                       ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
+                                   instr->ctx.data.len);
+               return 0;
 
-       if (ctrl & NAND_CTRL_CHANGE) {
-               if (gpiomtd->nce)
-                       gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
-               gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
-               gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
+       case NAND_OP_DATA_OUT_INSTR:
                gpio_nand_dosync(gpiomtd);
+               if ((chip->options & NAND_BUSWIDTH_16) &&
+                   !instr->ctx.data.force_8bit)
+                       iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
+                                     instr->ctx.data.len / 2);
+               else
+                       iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
+                                    instr->ctx.data.len);
+               return 0;
+
+       case NAND_OP_WAITRDY_INSTR:
+               if (!gpiomtd->rdy)
+                       return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
+
+               return nand_gpio_waitrdy(chip, gpiomtd->rdy,
+                                        instr->ctx.waitrdy.timeout_ms);
+
+       default:
+               return -EINVAL;
        }
-       if (cmd == NAND_CMD_NONE)
-               return;
 
-       writeb(cmd, gpiomtd->nand_chip.legacy.IO_ADDR_W);
-       gpio_nand_dosync(gpiomtd);
+       return 0;
 }
 
-static int gpio_nand_devready(struct nand_chip *chip)
+static int gpio_nand_exec_op(struct nand_chip *chip,
+                            const struct nand_operation *op,
+                            bool check_only)
 {
        struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
+       unsigned int i;
+       int ret = 0;
+
+       if (check_only)
+               return 0;
 
-       return gpiod_get_value(gpiomtd->rdy);
+       gpio_nand_dosync(gpiomtd);
+       gpiod_set_value(gpiomtd->nce, 0);
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
+               if (ret)
+                       break;
+
+               if (op->instrs[i].delay_ns)
+                       ndelay(op->instrs[i].delay_ns);
+       }
+       gpio_nand_dosync(gpiomtd);
+       gpiod_set_value(gpiomtd->nce, 1);
+
+       return ret;
 }
 
+static const struct nand_controller_ops gpio_nand_ops = {
+       .exec_op = gpio_nand_exec_op,
+};
+
 #ifdef CONFIG_OF
 static const struct of_device_id gpio_nand_id_table[] = {
        { .compatible = "gpio-control-nand" },
@@ -225,9 +293,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
        chip = &gpiomtd->nand_chip;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       chip->legacy.IO_ADDR_R = devm_ioremap_resource(dev, res);
-       if (IS_ERR(chip->legacy.IO_ADDR_R))
-               return PTR_ERR(chip->legacy.IO_ADDR_R);
+       gpiomtd->io = devm_ioremap_resource(dev, res);
+       if (IS_ERR(gpiomtd->io))
+               return PTR_ERR(gpiomtd->io);
 
        res = gpio_nand_get_io_sync(pdev);
        if (res) {
@@ -269,17 +337,15 @@ static int gpio_nand_probe(struct platform_device *pdev)
                ret = PTR_ERR(gpiomtd->rdy);
                goto out_ce;
        }
-       /* Using RDY pin */
-       if (gpiomtd->rdy)
-               chip->legacy.dev_ready = gpio_nand_devready;
+
+       nand_controller_init(&gpiomtd->base);
+       gpiomtd->base.ops = &gpio_nand_ops;
 
        nand_set_flash_node(chip, pdev->dev.of_node);
-       chip->legacy.IO_ADDR_W  = chip->legacy.IO_ADDR_R;
        chip->ecc.mode          = NAND_ECC_SOFT;
        chip->ecc.algo          = NAND_ECC_HAMMING;
        chip->options           = gpiomtd->plat.options;
-       chip->legacy.chip_delay = gpiomtd->plat.chip_delay;
-       chip->legacy.cmd_ctrl   = gpio_nand_cmd_ctrl;
+       chip->controller        = &gpiomtd->base;
 
        mtd                     = nand_to_mtd(chip);
        mtd->dev.parent         = dev;
index 061a8dd..5d4aee4 100644 (file)
@@ -736,8 +736,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
        udelay(dll_wait_time_us);
 }
 
-static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                    const struct nand_data_interface *conf)
+static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
+                               const struct nand_interface_config *conf)
 {
        struct gpmi_nand_data *this = nand_get_controller_data(chip);
        const struct nand_sdr_timings *sdr;
@@ -2400,7 +2400,7 @@ unmap:
 
 static const struct nand_controller_ops gpmi_nand_controller_ops = {
        .attach_chip = gpmi_nand_attach_chip,
-       .setup_data_interface = gpmi_setup_data_interface,
+       .setup_interface = gpmi_setup_interface,
        .exec_op = gpmi_nfc_exec_op,
 };
 
index 13fea64..54e3777 100644 (file)
@@ -90,8 +90,8 @@ static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
         * If the written data is completely 0xff, we also want to write 0xff as
         * ECC, otherwise we will get in trouble when doing subpage writes.
         */
-       if (memcmp(ecc_code, empty_block_ecc, ARRAY_SIZE(empty_block_ecc)) == 0)
-               memset(ecc_code, 0xff, ARRAY_SIZE(empty_block_ecc));
+       if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0)
+               memset(ecc_code, 0xff, sizeof(empty_block_ecc));
 
        return 0;
 }
index 03866b0..012876e 100644 (file)
@@ -53,12 +53,12 @@ struct nand_manufacturer_ops {
 };
 
 /**
- * struct nand_manufacturer - NAND Flash Manufacturer structure
+ * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor
  * @name: Manufacturer name
  * @id: manufacturer ID code of device.
  * @ops: manufacturer operations
  */
-struct nand_manufacturer {
+struct nand_manufacturer_desc {
        int id;
        char *name;
        const struct nand_manufacturer_ops *ops;
@@ -79,14 +79,21 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
 extern const struct mtd_pairing_scheme dist3_pairing_scheme;
 
 /* Core functions */
-const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id);
 int nand_bbm_get_next_page(struct nand_chip *chip, int page);
 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
                    int allowbbt);
-int onfi_fill_data_interface(struct nand_chip *chip,
-                            enum nand_data_interface_type type,
-                            int timing_mode);
+void onfi_fill_interface_config(struct nand_chip *chip,
+                               struct nand_interface_config *iface,
+                               enum nand_interface_type type,
+                               unsigned int timing_mode);
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+                                struct nand_interface_config *iface,
+                                struct nand_sdr_timings *spec_timings);
+const struct nand_interface_config *nand_get_reset_interface_config(void);
 int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
 int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
@@ -130,10 +137,10 @@ static inline int nand_exec_op(struct nand_chip *chip,
        return chip->controller->ops->exec_op(chip, op, false);
 }
 
-static inline bool nand_has_setup_data_iface(struct nand_chip *chip)
+static inline bool nand_controller_can_setup_interface(struct nand_chip *chip)
 {
        if (!chip->controller || !chip->controller->ops ||
-           !chip->controller->ops->setup_data_interface)
+           !chip->controller->ops->setup_interface)
                return false;
 
        if (chip->options & NAND_KEEP_TIMINGS)
index 260a043..8482d3b 100644 (file)
@@ -1096,6 +1096,8 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
                                                const u8 *oob_buf, bool raw,
                                                int page)
 {
+       const struct nand_sdr_timings *sdr =
+               nand_get_sdr_timings(nand_get_interface_config(chip));
        struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
        const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
@@ -1141,7 +1143,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
                return ret;
 
        ret = marvell_nfc_wait_op(chip,
-                                 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+                                 PSEC_TO_MSEC(sdr->tPROG_max));
        return ret;
 }
 
@@ -1562,6 +1564,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
                                             const u8 *buf,
                                             int oob_required, int page)
 {
+       const struct nand_sdr_timings *sdr =
+               nand_get_sdr_timings(nand_get_interface_config(chip));
        struct mtd_info *mtd = nand_to_mtd(chip);
        const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
        const u8 *data = buf;
@@ -1598,8 +1602,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
                marvell_nfc_wait_ndrun(chip);
        }
 
-       ret = marvell_nfc_wait_op(chip,
-                                 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+       ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
 
        marvell_nfc_disable_hw_ecc(chip);
 
@@ -2305,9 +2308,8 @@ static struct nand_bbt_descr bbt_mirror_descr = {
        .pattern = bbt_mirror_pattern
 };
 
-static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                           const struct nand_data_interface
-                                           *conf)
+static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+                                      const struct nand_interface_config *conf)
 {
        struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -2508,7 +2510,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip)
 static const struct nand_controller_ops marvell_nand_controller_ops = {
        .attach_chip = marvell_nand_attach_chip,
        .exec_op = marvell_nfc_exec_op,
-       .setup_data_interface = marvell_nfc_setup_data_interface,
+       .setup_interface = marvell_nfc_setup_interface,
 };
 
 static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
@@ -2644,7 +2646,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
 
        /*
         * Save a reference value for timing registers before
-        * ->setup_data_interface() is called.
+        * ->setup_interface() is called.
         */
        marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
        marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
index 3f37647..0e5829a 100644 (file)
@@ -573,10 +573,10 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
 static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
                                                int page, bool in)
 {
+       const struct nand_sdr_timings *sdr =
+               nand_get_sdr_timings(nand_get_interface_config(nand));
        struct mtd_info *mtd = nand_to_mtd(nand);
        struct meson_nfc *nfc = nand_get_controller_data(nand);
-       const struct nand_sdr_timings *sdr =
-               nand_get_sdr_timings(&nand->data_interface);
        u32 *addrs = nfc->cmdfifo.rw.addrs;
        u32 cs = nfc->param.chip_select;
        u32 cmd0, cmd_num, row_start;
@@ -626,9 +626,9 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
 static int meson_nfc_write_page_sub(struct nand_chip *nand,
                                    int page, int raw)
 {
-       struct mtd_info *mtd = nand_to_mtd(nand);
        const struct nand_sdr_timings *sdr =
-               nand_get_sdr_timings(&nand->data_interface);
+               nand_get_sdr_timings(nand_get_interface_config(nand));
+       struct mtd_info *mtd = nand_to_mtd(nand);
        struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
        struct meson_nfc *nfc = nand_get_controller_data(nand);
        int data_len, info_len;
@@ -1097,8 +1097,8 @@ static int meson_chip_buffer_init(struct nand_chip *nand)
 }
 
 static
-int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline,
-                                  const struct nand_data_interface *conf)
+int meson_nfc_setup_interface(struct nand_chip *nand, int csline,
+                             const struct nand_interface_config *conf)
 {
        struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
        const struct nand_sdr_timings *timings;
@@ -1222,7 +1222,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
 static const struct nand_controller_ops meson_nand_controller_ops = {
        .attach_chip = meson_nand_attach_chip,
        .detach_chip = meson_nand_detach_chip,
-       .setup_data_interface = meson_nfc_setup_data_interface,
+       .setup_interface = meson_nfc_setup_interface,
        .exec_op = meson_nfc_exec_op,
 };
 
index c1a6e31..ad1b55d 100644 (file)
@@ -387,44 +387,6 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
        return 0;
 }
 
-static void mtk_nfc_select_chip(struct nand_chip *nand, int chip)
-{
-       struct mtk_nfc *nfc = nand_get_controller_data(nand);
-       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
-
-       if (chip < 0)
-               return;
-
-       mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
-
-       nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
-}
-
-static int mtk_nfc_dev_ready(struct nand_chip *nand)
-{
-       struct mtk_nfc *nfc = nand_get_controller_data(nand);
-
-       if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
-               return 0;
-
-       return 1;
-}
-
-static void mtk_nfc_cmd_ctrl(struct nand_chip *chip, int dat,
-                            unsigned int ctrl)
-{
-       struct mtk_nfc *nfc = nand_get_controller_data(chip);
-
-       if (ctrl & NAND_ALE) {
-               mtk_nfc_send_address(nfc, dat);
-       } else if (ctrl & NAND_CLE) {
-               mtk_nfc_hw_reset(nfc);
-
-               nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
-               mtk_nfc_send_command(nfc, dat);
-       }
-}
-
 static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
 {
        int rc;
@@ -501,8 +463,76 @@ static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
                mtk_nfc_write_byte(chip, buf[i]);
 }
 
-static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
-                                       const struct nand_data_interface *conf)
+static int mtk_nfc_exec_instr(struct nand_chip *chip,
+                             const struct nand_op_instr *instr)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       unsigned int i;
+       u32 status;
+
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
+               return 0;
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++)
+                       mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
+               return 0;
+       case NAND_OP_DATA_IN_INSTR:
+               mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
+                                instr->ctx.data.len);
+               return 0;
+       case NAND_OP_DATA_OUT_INSTR:
+               mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
+                                 instr->ctx.data.len);
+               return 0;
+       case NAND_OP_WAITRDY_INSTR:
+               return readl_poll_timeout(nfc->regs + NFI_STA, status,
+                                         status & STA_BUSY, 20,
+                                         instr->ctx.waitrdy.timeout_ms);
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
+
+static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(nand);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+       mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
+
+       nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
+}
+
+static int mtk_nfc_exec_op(struct nand_chip *chip,
+                          const struct nand_operation *op,
+                          bool check_only)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       unsigned int i;
+       int ret = 0;
+
+       if (check_only)
+               return 0;
+
+       mtk_nfc_hw_reset(nfc);
+       nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+       mtk_nfc_select_target(chip, op->cs);
+
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
+                                  const struct nand_interface_config *conf)
 {
        struct mtk_nfc *nfc = nand_get_controller_data(chip);
        const struct nand_sdr_timings *timings;
@@ -803,6 +833,7 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        u32 reg;
        int ret;
 
+       mtk_nfc_select_target(chip, chip->cur_cs);
        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 
        if (!raw) {
@@ -920,6 +951,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
        u8 *buf;
        int rc;
 
+       mtk_nfc_select_target(chip, chip->cur_cs);
        start = data_offs / chip->ecc.size;
        end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
 
@@ -1325,7 +1357,8 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip)
 
 static const struct nand_controller_ops mtk_nfc_controller_ops = {
        .attach_chip = mtk_nfc_attach_chip,
-       .setup_data_interface = mtk_nfc_setup_data_interface,
+       .setup_interface = mtk_nfc_setup_interface,
+       .exec_op = mtk_nfc_exec_op,
 };
 
 static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
@@ -1381,13 +1414,6 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
        nand_set_controller_data(nand, nfc);
 
        nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
-       nand->legacy.dev_ready = mtk_nfc_dev_ready;
-       nand->legacy.select_chip = mtk_nfc_select_chip;
-       nand->legacy.write_byte = mtk_nfc_write_byte;
-       nand->legacy.write_buf = mtk_nfc_write_buf;
-       nand->legacy.read_byte = mtk_nfc_read_byte;
-       nand->legacy.read_buf = mtk_nfc_read_buf;
-       nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl;
 
        /* set default mode in case dt entry is missing */
        nand->ecc.mode = NAND_ECC_HW;
index 09dacb8..a043d76 100644 (file)
@@ -137,8 +137,8 @@ struct mxc_nand_devtype_data {
        u32 (*get_ecc_status)(struct mxc_nand_host *);
        const struct mtd_ooblayout_ops *ooblayout;
        void (*select_chip)(struct nand_chip *chip, int cs);
-       int (*setup_data_interface)(struct nand_chip *chip, int csline,
-                                   const struct nand_data_interface *conf);
+       int (*setup_interface)(struct nand_chip *chip, int csline,
+                              const struct nand_interface_config *conf);
        void (*enable_hwecc)(struct nand_chip *chip, bool enable);
 
        /*
@@ -1139,8 +1139,8 @@ static void preset_v1(struct mtd_info *mtd)
        writew(0x4, NFC_V1_V2_WRPROT);
 }
 
-static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline,
-                                       const struct nand_data_interface *conf)
+static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline,
+                                      const struct nand_interface_config *conf)
 {
        struct mxc_nand_host *host = nand_get_controller_data(chip);
        int tRC_min_ns, tRC_ps, ret;
@@ -1432,7 +1432,7 @@ static int mxc_nand_get_features(struct nand_chip *chip, int addr,
 }
 
 /*
- * The generic flash bbt decriptors overlap with our ecc
+ * The generic flash bbt descriptors overlap with our ecc
  * hardware, so define some i.MX specific ones.
  */
 static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
@@ -1521,7 +1521,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
        .get_ecc_status = get_ecc_status_v2,
        .ooblayout = &mxc_v2_ooblayout_ops,
        .select_chip = mxc_nand_select_chip_v2,
-       .setup_data_interface = mxc_nand_v2_setup_data_interface,
+       .setup_interface = mxc_nand_v2_setup_interface,
        .enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
        .irqpending_quirk = 0,
        .needs_ip = 0,
@@ -1738,17 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
        return 0;
 }
 
-static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                     const struct nand_data_interface *conf)
+static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
+                                const struct nand_interface_config *conf)
 {
        struct mxc_nand_host *host = nand_get_controller_data(chip);
 
-       return host->devtype_data->setup_data_interface(chip, chipnr, conf);
+       return host->devtype_data->setup_interface(chip, chipnr, conf);
 }
 
 static const struct nand_controller_ops mxcnd_controller_ops = {
        .attach_chip = mxcnd_attach_chip,
-       .setup_data_interface = mxcnd_setup_data_interface,
+       .setup_interface = mxcnd_setup_interface,
 };
 
 static int mxcnd_probe(struct platform_device *pdev)
@@ -1809,7 +1809,7 @@ static int mxcnd_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
-       if (!host->devtype_data->setup_data_interface)
+       if (!host->devtype_data->setup_interface)
                this->options |= NAND_KEEP_TIMINGS;
 
        if (host->devtype_data->needs_ip) {
index 57f3672..d66b5b0 100644 (file)
@@ -451,8 +451,8 @@ static int mxic_nfc_exec_op(struct nand_chip *chip,
        return ret;
 }
 
-static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
-                                        const struct nand_data_interface *conf)
+static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+                                   const struct nand_interface_config *conf)
 {
        struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
        const struct nand_sdr_timings *sdr;
@@ -480,7 +480,7 @@ static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
 
 static const struct nand_controller_ops mxic_nand_controller_ops = {
        .exec_op = mxic_nfc_exec_op,
-       .setup_data_interface = mxic_nfc_setup_data_interface,
+       .setup_interface = mxic_nfc_setup_interface,
 };
 
 static int mxic_nfc_probe(struct platform_device *pdev)
index 45124db..0c768cb 100644 (file)
@@ -773,7 +773,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
                return -ENOTSUPP;
 
        /* Wait tWB before polling the STATUS reg. */
-       timings = nand_get_sdr_timings(&chip->data_interface);
+       timings = nand_get_sdr_timings(nand_get_interface_config(chip));
        ndelay(PSEC_TO_NSEC(timings->tWB_max));
 
        ret = nand_status_op(chip, NULL);
@@ -898,7 +898,7 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr)
 }
 
 /**
- * nand_reset_data_interface - Reset data interface and timings
+ * nand_reset_interface - Reset data interface and timings
  * @chip: The NAND chip
  * @chipnr: Internal die id
  *
@@ -906,11 +906,12 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr)
  *
  * Returns 0 for success or negative error code otherwise.
  */
-static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
+static int nand_reset_interface(struct nand_chip *chip, int chipnr)
 {
+       const struct nand_controller_ops *ops = chip->controller->ops;
        int ret;
 
-       if (!nand_has_setup_data_iface(chip))
+       if (!nand_controller_can_setup_interface(chip))
                return 0;
 
        /*
@@ -927,9 +928,9 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
         * timings to timing mode 0.
         */
 
-       onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
-       ret = chip->controller->ops->setup_data_interface(chip, chipnr,
-                                                       &chip->data_interface);
+       chip->current_interface_config = nand_get_reset_interface_config();
+       ret = ops->setup_interface(chip, chipnr,
+                                  chip->current_interface_config);
        if (ret)
                pr_err("Failed to configure data interface to SDR timing mode 0\n");
 
@@ -937,28 +938,36 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
 }
 
 /**
- * nand_setup_data_interface - Setup the best data interface and timings
+ * nand_setup_interface - Setup the best data interface and timings
  * @chip: The NAND chip
  * @chipnr: Internal die id
  *
- * Find and configure the best data interface and NAND timings supported by
- * the chip and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table.
+ * Configure what has been reported to be the best data interface and NAND
+ * timings supported by the chip and the driver.
  *
  * Returns 0 for success or negative error code otherwise.
  */
-static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
+static int nand_setup_interface(struct nand_chip *chip, int chipnr)
 {
-       u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
-               chip->onfi_timing_mode_default,
-       };
+       const struct nand_controller_ops *ops = chip->controller->ops;
+       u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
        int ret;
 
-       if (!nand_has_setup_data_iface(chip))
+       if (!nand_controller_can_setup_interface(chip))
                return 0;
 
+       /*
+        * A nand_reset_interface() put both the NAND chip and the NAND
+        * controller in timings mode 0. If the default mode for this chip is
+        * also 0, no need to proceed to the change again. Plus, at probe time,
+        * nand_setup_interface() uses ->set/get_features() which would
+        * fail anyway as the parameter page is not available yet.
+        */
+       if (!chip->best_interface_config)
+               return 0;
+
+       tmode_param[0] = chip->best_interface_config->timings.mode;
+
        /* Change the mode on the chip side (if supported by the NAND chip) */
        if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
                nand_select_target(chip, chipnr);
@@ -970,14 +979,13 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
        }
 
        /* Change the mode on the controller side */
-       ret = chip->controller->ops->setup_data_interface(chip, chipnr,
-                                                       &chip->data_interface);
+       ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
        if (ret)
                return ret;
 
        /* Check the mode has been accepted by the chip, if supported */
        if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
-               return 0;
+               goto update_interface_config;
 
        memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
        nand_select_target(chip, chipnr);
@@ -987,12 +995,15 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
        if (ret)
                goto err_reset_chip;
 
-       if (tmode_param[0] != chip->onfi_timing_mode_default) {
+       if (tmode_param[0] != chip->best_interface_config->timings.mode) {
                pr_warn("timing mode %d not acknowledged by the NAND chip\n",
-                       chip->onfi_timing_mode_default);
+                       chip->best_interface_config->timings.mode);
                goto err_reset_chip;
        }
 
+update_interface_config:
+       chip->current_interface_config = chip->best_interface_config;
+
        return 0;
 
 err_reset_chip:
@@ -1000,7 +1011,7 @@ err_reset_chip:
         * Fallback to mode 0 if the chip explicitly did not ack the chosen
         * timing mode.
         */
-       nand_reset_data_interface(chip, chipnr);
+       nand_reset_interface(chip, chipnr);
        nand_select_target(chip, chipnr);
        nand_reset_op(chip);
        nand_deselect_target(chip);
@@ -1009,62 +1020,93 @@ err_reset_chip:
 }
 
 /**
- * nand_init_data_interface - find the best data interface and timings
- * @chip: The NAND chip
- *
- * Find the best data interface and NAND timings supported by the chip
- * and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table. After this
- * function nand_chip->data_interface is initialized with the best timing mode
- * available.
+ * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
+ *                                NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
  *
- * Returns 0 for success or negative error code otherwise.
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
  */
-static int nand_init_data_interface(struct nand_chip *chip)
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+                                struct nand_interface_config *iface,
+                                struct nand_sdr_timings *spec_timings)
 {
-       int modes, mode, ret;
+       const struct nand_controller_ops *ops = chip->controller->ops;
+       int best_mode = 0, mode, ret;
 
-       if (!nand_has_setup_data_iface(chip))
-               return 0;
+       iface->type = NAND_SDR_IFACE;
 
-       /*
-        * First try to identify the best timings from ONFI parameters and
-        * if the NAND does not support ONFI, fallback to the default ONFI
-        * timing mode.
-        */
-       if (chip->parameters.onfi) {
-               modes = chip->parameters.onfi->async_timing_mode;
-       } else {
-               if (!chip->onfi_timing_mode_default)
-                       return 0;
+       if (spec_timings) {
+               iface->timings.sdr = *spec_timings;
+               iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
 
-               modes = GENMASK(chip->onfi_timing_mode_default, 0);
+               /* Verify the controller supports the requested interface */
+               ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+                                          iface);
+               if (!ret) {
+                       chip->best_interface_config = iface;
+                       return ret;
+               }
+
+               /* Fallback to slower modes */
+               best_mode = iface->timings.mode;
+       } else if (chip->parameters.onfi) {
+               best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
        }
 
-       for (mode = fls(modes) - 1; mode >= 0; mode--) {
-               ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
-               if (ret)
-                       continue;
+       for (mode = best_mode; mode >= 0; mode--) {
+               onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
 
-               /*
-                * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
-                * controller supports the requested timings.
-                */
-               ret = chip->controller->ops->setup_data_interface(chip,
-                                                NAND_DATA_IFACE_CHECK_ONLY,
-                                                &chip->data_interface);
-               if (!ret) {
-                       chip->onfi_timing_mode_default = mode;
+               ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+                                          iface);
+               if (!ret)
                        break;
-               }
        }
 
+       chip->best_interface_config = iface;
+
        return 0;
 }
 
 /**
+ * nand_choose_interface_config - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver. Eventually let the NAND manufacturer driver propose his own
+ * set of timings.
+ *
+ * After this function nand_chip->interface_config is initialized with the best
+ * timing mode available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_choose_interface_config(struct nand_chip *chip)
+{
+       struct nand_interface_config *iface;
+       int ret;
+
+       if (!nand_controller_can_setup_interface(chip))
+               return 0;
+
+       iface = kzalloc(sizeof(*iface), GFP_KERNEL);
+       if (!iface)
+               return -ENOMEM;
+
+       if (chip->ops.choose_interface_config)
+               ret = chip->ops.choose_interface_config(chip, iface);
+       else
+               ret = nand_choose_best_sdr_timings(chip, iface, NULL);
+
+       if (ret)
+               kfree(iface);
+
+       return ret;
+}
+
+/**
  * nand_fill_column_cycles - fill the column cycles of an address
  * @chip: The NAND chip
  * @addrs: Array of address cycles to fill
@@ -1122,9 +1164,9 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
                                     unsigned int offset_in_page, void *buf,
                                     unsigned int len)
 {
-       struct mtd_info *mtd = nand_to_mtd(chip);
        const struct nand_sdr_timings *sdr =
-               nand_get_sdr_timings(&chip->data_interface);
+               nand_get_sdr_timings(nand_get_interface_config(chip));
+       struct mtd_info *mtd = nand_to_mtd(chip);
        u8 addrs[4];
        struct nand_op_instr instrs[] = {
                NAND_OP_CMD(NAND_CMD_READ0, 0),
@@ -1166,7 +1208,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
                                     unsigned int len)
 {
        const struct nand_sdr_timings *sdr =
-               nand_get_sdr_timings(&chip->data_interface);
+               nand_get_sdr_timings(nand_get_interface_config(chip));
        u8 addrs[5];
        struct nand_op_instr instrs[] = {
                NAND_OP_CMD(NAND_CMD_READ0, 0),
@@ -1263,7 +1305,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_PARAM, 0),
                        NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1318,7 +1360,7 @@ int nand_change_read_column_op(struct nand_chip *chip,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                u8 addrs[2] = {};
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
@@ -1392,9 +1434,9 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
                                  unsigned int offset_in_page, const void *buf,
                                  unsigned int len, bool prog)
 {
-       struct mtd_info *mtd = nand_to_mtd(chip);
        const struct nand_sdr_timings *sdr =
-               nand_get_sdr_timings(&chip->data_interface);
+               nand_get_sdr_timings(nand_get_interface_config(chip));
+       struct mtd_info *mtd = nand_to_mtd(chip);
        u8 addrs[5] = {};
        struct nand_op_instr instrs[] = {
                /*
@@ -1517,7 +1559,7 @@ int nand_prog_page_end_op(struct nand_chip *chip)
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_PAGEPROG,
                                    PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1624,7 +1666,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                u8 addrs[2];
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_RNDIN, 0),
@@ -1679,7 +1721,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_READID, 0),
                        NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1718,7 +1760,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status)
 {
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_STATUS,
                                    PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1787,7 +1829,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                u8 addrs[3] = { page, page >> 8, page >> 16 };
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_ERASE1, 0),
@@ -1846,7 +1888,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
                        NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
@@ -1893,7 +1935,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
                        NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
@@ -1950,7 +1992,7 @@ int nand_reset_op(struct nand_chip *chip)
 {
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
                        NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
@@ -2480,17 +2522,16 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
  * @chipnr: Internal die id
  *
  * Save the timings data structure, then apply SDR timings mode 0 (see
- * nand_reset_data_interface for details), do the reset operation, and
- * apply back the previous timings.
+ * nand_reset_interface for details), do the reset operation, and apply
+ * back the previous timings.
  *
  * Returns 0 on success, a negative error code otherwise.
  */
 int nand_reset(struct nand_chip *chip, int chipnr)
 {
-       struct nand_data_interface saved_data_intf = chip->data_interface;
        int ret;
 
-       ret = nand_reset_data_interface(chip, chipnr);
+       ret = nand_reset_interface(chip, chipnr);
        if (ret)
                return ret;
 
@@ -2505,18 +2546,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
        if (ret)
                return ret;
 
-       /*
-        * A nand_reset_data_interface() put both the NAND chip and the NAND
-        * controller in timings mode 0. If the default mode for this chip is
-        * also 0, no need to proceed to the change again. Plus, at probe time,
-        * nand_setup_data_interface() uses ->set/get_features() which would
-        * fail anyway as the parameter page is not available yet.
-        */
-       if (!chip->onfi_timing_mode_default)
-               return 0;
-
-       chip->data_interface = saved_data_intf;
-       ret = nand_setup_data_interface(chip, chipnr);
+       ret = nand_setup_interface(chip, chipnr);
        if (ret)
                return ret;
 
@@ -3215,10 +3245,10 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
        if (retry_mode >= chip->read_retries)
                return -EINVAL;
 
-       if (!chip->setup_read_retry)
+       if (!chip->ops.setup_read_retry)
                return -EOPNOTSUPP;
 
-       return chip->setup_read_retry(chip, retry_mode);
+       return chip->ops.setup_read_retry(chip, retry_mode);
 }
 
 static void nand_wait_readrdy(struct nand_chip *chip)
@@ -3228,7 +3258,7 @@ static void nand_wait_readrdy(struct nand_chip *chip)
        if (!(chip->options & NAND_NEED_READRDY))
                return;
 
-       sdr = nand_get_sdr_timings(&chip->data_interface);
+       sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
        WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
 }
 
@@ -4462,8 +4492,8 @@ static int nand_suspend(struct mtd_info *mtd)
        int ret = 0;
 
        mutex_lock(&chip->lock);
-       if (chip->suspend)
-               ret = chip->suspend(chip);
+       if (chip->ops.suspend)
+               ret = chip->ops.suspend(chip);
        if (!ret)
                chip->suspended = 1;
        mutex_unlock(&chip->lock);
@@ -4481,8 +4511,8 @@ static void nand_resume(struct mtd_info *mtd)
 
        mutex_lock(&chip->lock);
        if (chip->suspended) {
-               if (chip->resume)
-                       chip->resume(chip);
+               if (chip->ops.resume)
+                       chip->ops.resume(chip);
                chip->suspended = 0;
        } else {
                pr_err("%s called for a chip which is not in suspended state\n",
@@ -4511,10 +4541,10 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
 
-       if (!chip->lock_area)
+       if (!chip->ops.lock_area)
                return -ENOTSUPP;
 
-       return chip->lock_area(chip, ofs, len);
+       return chip->ops.lock_area(chip, ofs, len);
 }
 
 /**
@@ -4527,10 +4557,10 @@ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
        struct nand_chip *chip = mtd_to_nand(mtd);
 
-       if (!chip->unlock_area)
+       if (!chip->ops.unlock_area)
                return -ENOTSUPP;
 
-       return chip->unlock_area(chip, ofs, len);
+       return chip->ops.unlock_area(chip, ofs, len);
 }
 
 /* Set default functions */
@@ -4743,8 +4773,6 @@ static bool find_full_id_nand(struct nand_chip *chip,
                chip->options |= type->options;
                chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
                chip->base.eccreq.step_size = NAND_ECC_STEP(type);
-               chip->onfi_timing_mode_default =
-                                       type->onfi_timing_mode_default;
 
                chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
                if (!chip->parameters.model)
@@ -4810,9 +4838,9 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip)
 }
 
 static const char *
-nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
+nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
 {
-       return manufacturer ? manufacturer->name : "Unknown";
+       return manufacturer_desc ? manufacturer_desc->name : "Unknown";
 }
 
 /*
@@ -4820,7 +4848,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
  */
 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
 {
-       const struct nand_manufacturer *manufacturer;
+       const struct nand_manufacturer_desc *manufacturer_desc;
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct nand_memory_organization *memorg;
        int busw, ret;
@@ -4877,8 +4905,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
        chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
 
        /* Try to identify manufacturer */
-       manufacturer = nand_get_manufacturer(maf_id);
-       chip->manufacturer.desc = manufacturer;
+       manufacturer_desc = nand_get_manufacturer_desc(maf_id);
+       chip->manufacturer.desc = manufacturer_desc;
 
        if (!type)
                type = nand_flash_ids;
@@ -4957,7 +4985,7 @@ ident_done:
                 */
                pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
                        maf_id, dev_id);
-               pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
+               pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
                        mtd->name);
                pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
                        (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
@@ -4992,7 +5020,7 @@ ident_done:
 
        pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
                maf_id, dev_id);
-       pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
+       pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
                chip->parameters.model);
        pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
                (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
@@ -5185,7 +5213,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
        mutex_init(&chip->lock);
 
        /* Enforce the right timings for reset/detection */
-       onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+       chip->current_interface_config = nand_get_reset_interface_config();
 
        ret = nand_dt_init(chip);
        if (ret)
@@ -5972,16 +6000,16 @@ static int nand_scan_tail(struct nand_chip *chip)
        if (!mtd->bitflip_threshold)
                mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
 
-       /* Initialize the ->data_interface field. */
-       ret = nand_init_data_interface(chip);
+       /* Find the fastest data interface for this chip */
+       ret = nand_choose_interface_config(chip);
        if (ret)
                goto err_nanddev_cleanup;
 
        /* Enter fastest possible mode on all dies. */
        for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
-               ret = nand_setup_data_interface(chip, i);
+               ret = nand_setup_interface(chip, i);
                if (ret)
-                       goto err_nanddev_cleanup;
+                       goto err_free_interface_config;
        }
 
        /* Check, if we should skip the bad block table scan */
@@ -5991,10 +6019,12 @@ static int nand_scan_tail(struct nand_chip *chip)
        /* Build bad block table */
        ret = nand_create_bbt(chip);
        if (ret)
-               goto err_nanddev_cleanup;
+               goto err_free_interface_config;
 
        return 0;
 
+err_free_interface_config:
+       kfree(chip->best_interface_config);
 
 err_nanddev_cleanup:
        nanddev_cleanup(&chip->base);
@@ -6088,6 +6118,9 @@ void nand_cleanup(struct nand_chip *chip)
                        & NAND_BBT_DYNAMICSTRUCT)
                kfree(chip->badblock_pattern);
 
+       /* Free the data interface */
+       kfree(chip->best_interface_config);
+
        /* Free manufacturer priv data. */
        nand_manufacturer_cleanup(chip);
 
index 96045d6..344a24f 100644 (file)
@@ -1226,7 +1226,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
                return -ENOMEM;
 
        /*
-        * If no primary table decriptor is given, scan the device to build a
+        * If no primary table descriptor is given, scan the device to build a
         * memory based bad block table.
         */
        if (!td) {
index 7caedaa..6d08eb8 100644 (file)
@@ -337,7 +337,7 @@ static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
        rr->nregs = nregs;
        rr->regs = hynix_1xnm_mlc_read_retry_regs;
        hynix->read_retry = rr;
-       chip->setup_read_retry = hynix_nand_setup_read_retry;
+       chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
        chip->read_retries = nmodes;
 
 out:
@@ -673,6 +673,15 @@ static void hynix_nand_cleanup(struct nand_chip *chip)
        nand_set_manufacturer_data(chip, NULL);
 }
 
+static int
+h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
+                                      struct nand_interface_config *iface)
+{
+       onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+       return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
 static int hynix_nand_init(struct nand_chip *chip)
 {
        struct hynix_nand *hynix;
@@ -689,6 +698,11 @@ static int hynix_nand_init(struct nand_chip *chip)
 
        nand_set_manufacturer_data(chip, hynix);
 
+       if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
+                    sizeof("H27UCG8T2ATR-BC") - 1))
+               chip->ops.choose_interface_config =
+                       h27ucg8t2atrbc_choose_interface_config;
+
        ret = hynix_nand_rr_init(chip);
        if (ret)
                hynix_nand_cleanup(chip);
index ba27902..b994579 100644 (file)
@@ -28,8 +28,7 @@ struct nand_flash_dev nand_flash_ids[] = {
         */
        {"TC58NVG0S3E 1G 3.3V 8-bit",
                { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
-                 SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
-                 2 },
+                 SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
        {"TC58NVG2S0F 4G 3.3V 8-bit",
                { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
                  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
@@ -51,7 +50,10 @@ struct nand_flash_dev nand_flash_ids[] = {
        {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
                { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
                  SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
-                 NAND_ECC_INFO(40, SZ_1K), 4 },
+                 NAND_ECC_INFO(40, SZ_1K) },
+       {"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
+               { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
+                 SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
 
        LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
        LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
@@ -166,7 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = {
 };
 
 /* Manufacturer IDs */
-static const struct nand_manufacturer nand_manufacturers[] = {
+static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
        {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
        {NAND_MFR_ATO, "ATO"},
        {NAND_MFR_EON, "Eon"},
@@ -186,20 +188,20 @@ static const struct nand_manufacturer nand_manufacturers[] = {
 };
 
 /**
- * nand_get_manufacturer - Get manufacturer information from the manufacturer
- *                        ID
+ * nand_get_manufacturer_desc - Get manufacturer information from the
+ *                              manufacturer ID
  * @id: manufacturer ID
  *
- * Returns a pointer a nand_manufacturer object if the manufacturer is defined
+ * Returns a nand_manufacturer_desc object if the manufacturer is defined
  * in the NAND manufacturers database, NULL otherwise.
  */
-const struct nand_manufacturer *nand_get_manufacturer(u8 id)
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++)
-               if (nand_manufacturers[i].id == id)
-                       return &nand_manufacturers[i];
+       for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
+               if (nand_manufacturer_descs[i].id == id)
+                       return &nand_manufacturer_descs[i];
 
        return NULL;
 }
index d64791c..2bcc037 100644 (file)
@@ -354,6 +354,9 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
 
 static void nand_ccs_delay(struct nand_chip *chip)
 {
+       const struct nand_sdr_timings *sdr =
+               nand_get_sdr_timings(nand_get_interface_config(chip));
+
        /*
         * The controller already takes care of waiting for tCCS when the RNDIN
         * or RNDOUT command is sent, return directly.
@@ -365,8 +368,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
         * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
         * (which should be safe for all NANDs).
         */
-       if (nand_has_setup_data_iface(chip))
-               ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
+       if (nand_controller_can_setup_interface(chip))
+               ndelay(sdr->tCCS_min / 1000);
        else
                ndelay(500);
 }
index 09c254c..1472f92 100644 (file)
@@ -130,7 +130,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip)
                return;
 
        chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
-       chip->setup_read_retry = macronix_nand_setup_read_retry;
+       chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
 
        if (p->supports_set_get_features) {
                bitmap_set(p->set_feature_list,
@@ -242,8 +242,8 @@ static void macronix_nand_block_protection_support(struct nand_chip *chip)
        bitmap_set(chip->parameters.set_feature_list,
                   ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
 
-       chip->lock_area = mxic_nand_lock;
-       chip->unlock_area = mxic_nand_unlock;
+       chip->ops.lock_area = mxic_nand_lock;
+       chip->ops.unlock_area = mxic_nand_unlock;
 }
 
 static int nand_power_down_op(struct nand_chip *chip)
@@ -312,8 +312,8 @@ static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
        if (i < 0)
                return;
 
-       chip->suspend = mxic_nand_suspend;
-       chip->resume = mxic_nand_resume;
+       chip->ops.suspend = mxic_nand_suspend;
+       chip->ops.resume = mxic_nand_resume;
 }
 
 static int macronix_nand_init(struct nand_chip *chip)
index 3589b4f..4385092 100644 (file)
@@ -84,7 +84,7 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
                struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
 
                chip->read_retries = micron->read_retry_options;
-               chip->setup_read_retry = micron_nand_setup_read_retry;
+               chip->ops.setup_read_retry = micron_nand_setup_read_retry;
        }
 
        if (p->supports_set_get_features) {
index 36d21be..94d8326 100644 (file)
 
 #define ONFI_DYN_TIMING_MAX U16_MAX
 
-static const struct nand_data_interface onfi_sdr_timings[] = {
+/*
+ * For non-ONFI chips we use the highest possible value for tPROG and tBERS.
+ * tR and tCCS will take the default values precised in the ONFI specification
+ * for timing mode 0, respectively 200us and 500ns.
+ *
+ * These four values are tweaked to be more accurate in the case of ONFI chips.
+ */
+static const struct nand_interface_config onfi_sdr_timings[] = {
        /* Mode 0 */
        {
                .type = NAND_SDR_IFACE,
@@ -20,6 +27,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 20000,
                        .tALS_min = 50000,
@@ -63,6 +72,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 10000,
                        .tALS_min = 25000,
@@ -106,6 +117,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 10000,
                        .tALS_min = 15000,
@@ -149,6 +162,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 5000,
                        .tALS_min = 10000,
@@ -192,6 +207,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 5000,
                        .tALS_min = 10000,
@@ -235,6 +252,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
                .timings.sdr = {
                        .tCCS_min = 500000,
                        .tR_max = 200000000,
+                       .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+                       .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
                        .tADL_min = 400000,
                        .tALH_min = 5000,
                        .tALS_min = 10000,
@@ -273,23 +292,79 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
        },
 };
 
+/* All NAND chips share the same reset data interface: SDR mode 0 */
+const struct nand_interface_config *nand_get_reset_interface_config(void)
+{
+       return &onfi_sdr_timings[0];
+}
+
+/**
+ * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
+ *                              set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
+{
+       const struct nand_sdr_timings *onfi_timings;
+       int mode;
+
+       for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
+               onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
+
+               if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+                   spec_timings->tADL_min <= onfi_timings->tADL_min &&
+                   spec_timings->tALH_min <= onfi_timings->tALH_min &&
+                   spec_timings->tALS_min <= onfi_timings->tALS_min &&
+                   spec_timings->tAR_min <= onfi_timings->tAR_min &&
+                   spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+                   spec_timings->tCH_min <= onfi_timings->tCH_min &&
+                   spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
+                   spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
+                   spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
+                   spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
+                   spec_timings->tCS_min <= onfi_timings->tCS_min &&
+                   spec_timings->tDH_min <= onfi_timings->tDH_min &&
+                   spec_timings->tDS_min <= onfi_timings->tDS_min &&
+                   spec_timings->tIR_min <= onfi_timings->tIR_min &&
+                   spec_timings->tRC_min <= onfi_timings->tRC_min &&
+                   spec_timings->tREH_min <= onfi_timings->tREH_min &&
+                   spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
+                   spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+                   spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
+                   spec_timings->tRP_min <= onfi_timings->tRP_min &&
+                   spec_timings->tRR_min <= onfi_timings->tRR_min &&
+                   spec_timings->tWC_min <= onfi_timings->tWC_min &&
+                   spec_timings->tWH_min <= onfi_timings->tWH_min &&
+                   spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+                   spec_timings->tWP_min <= onfi_timings->tWP_min &&
+                   spec_timings->tWW_min <= onfi_timings->tWW_min)
+                       return mode;
+       }
+
+       return 0;
+}
+
 /**
- * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
- * given ONFI mode
- * @mode: The ONFI timing mode
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ *                              ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
  */
-int onfi_fill_data_interface(struct nand_chip *chip,
-                            enum nand_data_interface_type type,
-                            int timing_mode)
+void onfi_fill_interface_config(struct nand_chip *chip,
+                               struct nand_interface_config *iface,
+                               enum nand_interface_type type,
+                               unsigned int timing_mode)
 {
-       struct nand_data_interface *iface = &chip->data_interface;
        struct onfi_params *onfi = chip->parameters.onfi;
 
-       if (type != NAND_SDR_IFACE)
-               return -EINVAL;
+       if (WARN_ON(type != NAND_SDR_IFACE))
+               return;
 
-       if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
-               return -EINVAL;
+       if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
+               return;
 
        *iface = onfi_sdr_timings[timing_mode];
 
@@ -308,22 +383,5 @@ int onfi_fill_data_interface(struct nand_chip *chip,
 
                /* nanoseconds -> picoseconds */
                timings->tCCS_min = 1000UL * onfi->tCCS;
-       } else {
-               struct nand_sdr_timings *timings = &iface->timings.sdr;
-               /*
-                * For non-ONFI chips we use the highest possible value for
-                * tPROG and tBERS. tR and tCCS will take the default values
-                * precised in the ONFI specification for timing mode 0,
-                * respectively 200us and 500ns.
-                */
-
-               /* microseconds -> picoseconds */
-               timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
-               timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
-
-               timings->tR_max = 200000000;
-               timings->tCCS_min = 500000;
        }
-
-       return 0;
 }
index ae06990..f746c19 100644 (file)
@@ -33,7 +33,7 @@ static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
 
        if (nand_has_exec_op(chip)) {
                const struct nand_sdr_timings *sdr =
-                       nand_get_sdr_timings(&chip->data_interface);
+                       nand_get_sdr_timings(nand_get_interface_config(chip));
                struct nand_op_instr instrs[] = {
                        NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
                                    PSEC_TO_NSEC(sdr->tADL_min)),
@@ -194,17 +194,79 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
        }
 }
 
+static int
+tc58teg5dclta00_choose_interface_config(struct nand_chip *chip,
+                                       struct nand_interface_config *iface)
+{
+       onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5);
+
+       return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
+                                   struct nand_interface_config *iface)
+{
+       onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
+
+       return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
+                                       struct nand_interface_config *iface)
+{
+       struct nand_sdr_timings *sdr = &iface->timings.sdr;
+
+       /* Start with timings from the closest timing mode, mode 4. */
+       onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+       /* Patch timings that differ from mode 4. */
+       sdr->tALS_min = 12000;
+       sdr->tCHZ_max = 20000;
+       sdr->tCLS_min = 12000;
+       sdr->tCOH_min = 0;
+       sdr->tDS_min = 12000;
+       sdr->tRHOH_min = 25000;
+       sdr->tRHW_min = 30000;
+       sdr->tRHZ_max = 60000;
+       sdr->tWHR_min = 60000;
+
+       /* Patch timings not part of onfi timing mode. */
+       sdr->tPROG_max = 700000000;
+       sdr->tBERS_max = 5000000000;
+
+       return nand_choose_best_sdr_timings(chip, iface, sdr);
+}
+
 static int tc58teg5dclta00_init(struct nand_chip *chip)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
 
-       chip->onfi_timing_mode_default = 5;
+       chip->ops.choose_interface_config =
+               &tc58teg5dclta00_choose_interface_config;
        chip->options |= NAND_NEED_SCRAMBLING;
        mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
 
        return 0;
 }
 
+static int tc58nvg0s3e_init(struct nand_chip *chip)
+{
+       chip->ops.choose_interface_config =
+               &tc58nvg0s3e_choose_interface_config;
+
+       return 0;
+}
+
+static int th58nvg2s3hbai4_init(struct nand_chip *chip)
+{
+       chip->ops.choose_interface_config =
+               &th58nvg2s3hbai4_choose_interface_config;
+
+       return 0;
+}
+
 static int toshiba_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
@@ -217,6 +279,12 @@ static int toshiba_nand_init(struct nand_chip *chip)
 
        if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
                tc58teg5dclta00_init(chip);
+       if (!strncmp("TC58NVG0S3E", chip->parameters.model,
+                    sizeof("TC58NVG0S3E") - 1))
+               tc58nvg0s3e_init(chip);
+       if (!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
+                    sizeof("TH58NVG2S3HBAI4") - 1))
+               th58nvg2s3hbai4_init(chip);
 
        return 0;
 }
index 078b102..4b79952 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Error Location Module
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  */
 
 #define DRIVER_NAME    "omap-elm"
index f1daf33..bd7a725 100644 (file)
@@ -459,11 +459,13 @@ struct qcom_nand_host {
  * among different NAND controllers.
  * @ecc_modes - ecc mode for NAND
  * @is_bam - whether NAND controller is using BAM
+ * @is_qpic - whether NAND CTRL is part of qpic IP
  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
  */
 struct qcom_nandc_props {
        u32 ecc_modes;
        bool is_bam;
+       bool is_qpic;
        u32 dev_cmd_reg_start;
 };
 
@@ -2774,14 +2776,24 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
        u32 nand_ctrl;
 
        /* kill onenand */
-       nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+       if (!nandc->props->is_qpic)
+               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
        nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
                    NAND_DEV_CMD_VLD_VAL);
 
        /* enable ADM or BAM DMA */
        if (nandc->props->is_bam) {
                nand_ctrl = nandc_read(nandc, NAND_CTRL);
-               nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
+
+               /*
+                *NAND_CTRL is an operational registers, and CPU
+                * access to operational registers are read only
+                * in BAM mode. So update the NAND_CTRL register
+                * only if it is not in BAM mode. In most cases BAM
+                * mode will be enabled in bootloader
+                */
+               if (!(nand_ctrl & BAM_MODE_EN))
+                       nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
        } else {
                nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
        }
@@ -3035,12 +3047,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
 static const struct qcom_nandc_props ipq4019_nandc_props = {
        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
        .is_bam = true,
+       .is_qpic = true,
        .dev_cmd_reg_start = 0x0,
 };
 
 static const struct qcom_nandc_props ipq8074_nandc_props = {
        .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
        .is_bam = true,
+       .is_qpic = true,
        .dev_cmd_reg_start = 0x7000,
 };
 
index d0dd0c4..1055222 100644 (file)
@@ -808,8 +808,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
        return -ENODEV;
 }
 
-static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline,
-                                       const struct nand_data_interface *conf)
+static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
+                                       const struct nand_interface_config *conf)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
@@ -999,7 +999,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip)
 
 static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
        .attach_chip = s3c2410_nand_attach_chip,
-       .setup_data_interface = s3c2410_nand_setup_data_interface,
+       .setup_interface = s3c2410_nand_setup_interface,
 };
 
 static const struct of_device_id s3c24xx_nand_dt_ids[] = {
index 65c9d17..7f4546a 100644 (file)
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/mtd/rawnand.h>
+#include <linux/of_address.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/reset.h>
 
 /* Bad block marker length */
@@ -242,7 +245,8 @@ struct stm32_fmc2_nfc {
        struct nand_controller base;
        struct stm32_fmc2_nand nand;
        struct device *dev;
-       void __iomem *io_base;
+       struct device *cdev;
+       struct regmap *regmap;
        void __iomem *data_base[FMC2_MAX_CE];
        void __iomem *cmd_base[FMC2_MAX_CE];
        void __iomem *addr_base[FMC2_MAX_CE];
@@ -277,40 +281,37 @@ static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
        struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
        struct stm32_fmc2_timings *timings = &nand->timings;
-       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
        u32 pmem, patt;
 
        /* Set tclr/tar timings */
-       pcr &= ~FMC2_PCR_TCLR;
-       pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr);
-       pcr &= ~FMC2_PCR_TAR;
-       pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar);
+       regmap_update_bits(nfc->regmap, FMC2_PCR,
+                          FMC2_PCR_TCLR | FMC2_PCR_TAR,
+                          FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
+                          FIELD_PREP(FMC2_PCR_TAR, timings->tar));
 
        /* Set tset/twait/thold/thiz timings in common bank */
        pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
        pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
        pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
        pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
+       regmap_write(nfc->regmap, FMC2_PMEM, pmem);
 
        /* Set tset/twait/thold/thiz timings in attribut bank */
        patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
        patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
        patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
        patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
-
-       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
-       writel_relaxed(pmem, nfc->io_base + FMC2_PMEM);
-       writel_relaxed(patt, nfc->io_base + FMC2_PATT);
+       regmap_write(nfc->regmap, FMC2_PATT, patt);
 }
 
 static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
 {
        struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
-       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
+       u32 pcr = 0, pcr_mask;
 
        /* Configure ECC algorithm (default configuration is Hamming) */
-       pcr &= ~FMC2_PCR_ECCALG;
-       pcr &= ~FMC2_PCR_BCHECC;
+       pcr_mask = FMC2_PCR_ECCALG;
+       pcr_mask |= FMC2_PCR_BCHECC;
        if (chip->ecc.strength == FMC2_ECC_BCH8) {
                pcr |= FMC2_PCR_ECCALG;
                pcr |= FMC2_PCR_BCHECC;
@@ -319,15 +320,15 @@ static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
        }
 
        /* Set buswidth */
-       pcr &= ~FMC2_PCR_PWID;
+       pcr_mask |= FMC2_PCR_PWID;
        if (chip->options & NAND_BUSWIDTH_16)
                pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
 
        /* Set ECC sector size */
-       pcr &= ~FMC2_PCR_ECCSS;
+       pcr_mask |= FMC2_PCR_ECCSS;
        pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
 
-       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+       regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
 }
 
 static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
@@ -393,81 +394,63 @@ static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
 
 static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
 {
-       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
+       u32 pcr;
 
-       pcr &= ~FMC2_PCR_PWID;
-       if (set)
-               pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
-       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+       pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
+                   FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
+
+       regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
 }
 
 static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
 {
-       u32 pcr = readl(nfc->io_base + FMC2_PCR);
-
-       pcr &= ~FMC2_PCR_ECCEN;
-       if (enable)
-               pcr |= FMC2_PCR_ECCEN;
-       writel(pcr, nfc->io_base + FMC2_PCR);
+       regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
+                          enable ? FMC2_PCR_ECCEN : 0);
 }
 
-static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
+static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
-
-       csqier |= FMC2_CSQIER_TCIE;
-
        nfc->irq_state = FMC2_IRQ_SEQ;
 
-       writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
+       regmap_update_bits(nfc->regmap, FMC2_CSQIER,
+                          FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
 }
 
-static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
+static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
-
-       csqier &= ~FMC2_CSQIER_TCIE;
-
-       writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
+       regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
 
        nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
+static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
 {
-       writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR);
+       regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
 }
 
-static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc,
-                                                int mode)
+static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
 {
-       u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
+       nfc->irq_state = FMC2_IRQ_BCH;
 
        if (mode == NAND_ECC_WRITE)
-               bchier |= FMC2_BCHIER_EPBRIE;
+               regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+                                  FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
        else
-               bchier |= FMC2_BCHIER_DERIE;
-
-       nfc->irq_state = FMC2_IRQ_BCH;
-
-       writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
+               regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+                                  FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
 }
 
-static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
+static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-       u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
-
-       bchier &= ~FMC2_BCHIER_DERIE;
-       bchier &= ~FMC2_BCHIER_EPBRIE;
-
-       writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
+       regmap_update_bits(nfc->regmap, FMC2_BCHIER,
+                          FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
 
        nfc->irq_state = FMC2_IRQ_UNKNOWN;
 }
 
-static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
 {
-       writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR);
+       regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
 }
 
 /*
@@ -481,13 +464,8 @@ static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
        stm32_fmc2_nfc_set_ecc(nfc, false);
 
        if (chip->ecc.strength != FMC2_ECC_HAM) {
-               u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
-
-               if (mode == NAND_ECC_WRITE)
-                       pcr |= FMC2_PCR_WEN;
-               else
-                       pcr &= ~FMC2_PCR_WEN;
-               writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+               regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+                                  mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
 
                reinit_completion(&nfc->complete);
                stm32_fmc2_nfc_clear_bch_irq(nfc);
@@ -502,7 +480,7 @@ static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
  * ECC is 3 bytes for 512 bytes of data (supports error correction up to
  * max of 1-bit)
  */
-static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
 {
        ecc[0] = ecc_sta;
        ecc[1] = ecc_sta >> 8;
@@ -516,15 +494,15 @@ static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
        u32 sr, heccr;
        int ret;
 
-       ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR,
-                                        sr, sr & FMC2_SR_NWRF, 1,
-                                        1000 * FMC2_TIMEOUT_MS);
+       ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+                                      sr & FMC2_SR_NWRF, 1,
+                                      1000 * FMC2_TIMEOUT_MS);
        if (ret) {
                dev_err(nfc->dev, "ham timeout\n");
                return ret;
        }
 
-       heccr = readl_relaxed(nfc->io_base + FMC2_HECCR);
+       regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
        stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
        stm32_fmc2_nfc_set_ecc(nfc, false);
 
@@ -603,13 +581,13 @@ static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
        }
 
        /* Read parity bits */
-       bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1);
+       regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
        ecc[0] = bchpbr;
        ecc[1] = bchpbr >> 8;
        ecc[2] = bchpbr >> 16;
        ecc[3] = bchpbr >> 24;
 
-       bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2);
+       regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
        ecc[4] = bchpbr;
        ecc[5] = bchpbr >> 8;
        ecc[6] = bchpbr >> 16;
@@ -617,13 +595,13 @@ static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
        if (chip->ecc.strength == FMC2_ECC_BCH8) {
                ecc[7] = bchpbr >> 24;
 
-               bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3);
+               regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
                ecc[8] = bchpbr;
                ecc[9] = bchpbr >> 8;
                ecc[10] = bchpbr >> 16;
                ecc[11] = bchpbr >> 24;
 
-               bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4);
+               regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
                ecc[12] = bchpbr;
        }
 
@@ -685,11 +663,7 @@ static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
                return -ETIMEDOUT;
        }
 
-       ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0);
-       ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1);
-       ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2);
-       ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3);
-       ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4);
+       regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
 
        stm32_fmc2_nfc_set_ecc(nfc, false);
 
@@ -764,30 +738,29 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
 {
        struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
        struct mtd_info *mtd = nand_to_mtd(chip);
-       u32 csqcfgr1, csqcfgr2, csqcfgr3;
-       u32 csqar1, csqar2;
        u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
-       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
+       /*
+        * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
+        * cfg[3] => csqar1, cfg[4] => csqar2
+        */
+       u32 cfg[5];
 
-       if (write_data)
-               pcr |= FMC2_PCR_WEN;
-       else
-               pcr &= ~FMC2_PCR_WEN;
-       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+       regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
+                          write_data ? FMC2_PCR_WEN : 0);
 
        /*
         * - Set Program Page/Page Read command
         * - Enable DMA request data
         * - Set timings
         */
-       csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
+       cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
        if (write_data)
-               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
+               cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
        else
-               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
-                           FMC2_CSQCFGR1_CMD2EN |
-                           FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
-                           FMC2_CSQCFGR1_CMD2T;
+               cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
+                         FMC2_CSQCFGR1_CMD2EN |
+                         FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
+                         FMC2_CSQCFGR1_CMD2T;
 
        /*
         * - Set Random Data Input/Random Data Read command
@@ -796,30 +769,29 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
         * - Set timings
         */
        if (write_data)
-               csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
+               cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
        else
-               csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
-                          FMC2_CSQCFGR2_RCMD2EN |
-                          FIELD_PREP(FMC2_CSQCFGR2_RCMD2,
-                                     NAND_CMD_RNDOUTSTART) |
-                          FMC2_CSQCFGR2_RCMD1T |
-                          FMC2_CSQCFGR2_RCMD2T;
+               cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
+                        FMC2_CSQCFGR2_RCMD2EN |
+                        FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
+                        FMC2_CSQCFGR2_RCMD1T |
+                        FMC2_CSQCFGR2_RCMD2T;
        if (!raw) {
-               csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
-               csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN;
+               cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
+               cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
        }
 
        /*
         * - Set the number of sectors to be written
         * - Set timings
         */
-       csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
+       cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
        if (write_data) {
-               csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
+               cfg[2] |= FMC2_CSQCFGR3_RAC2T;
                if (chip->options & NAND_ROW_ADDR_3)
-                       csqcfgr3 |= FMC2_CSQCFGR3_AC5T;
+                       cfg[2] |= FMC2_CSQCFGR3_AC5T;
                else
-                       csqcfgr3 |= FMC2_CSQCFGR3_AC4T;
+                       cfg[2] |= FMC2_CSQCFGR3_AC4T;
        }
 
        /*
@@ -827,8 +799,8 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
         * Byte 1 and byte 2 => column, we start at 0x0
         * Byte 3 and byte 4 => page
         */
-       csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
-       csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
+       cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+       cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
 
        /*
         * - Set chip enable number
@@ -836,23 +808,19 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
         * - Calculate the number of address cycles to be issued
         * - Set byte 5 of address cycle if needed
         */
-       csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
+       cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
        if (chip->options & NAND_BUSWIDTH_16)
-               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
+               cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
        else
-               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
+               cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
        if (chip->options & NAND_ROW_ADDR_3) {
-               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
-               csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
+               cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+               cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
        } else {
-               csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
+               cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
        }
 
-       writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1);
-       writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2);
-       writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3);
-       writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1);
-       writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2);
+       regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
 }
 
 static void stm32_fmc2_nfc_dma_callback(void *arg)
@@ -870,7 +838,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
        struct dma_chan *dma_ch = nfc->dma_rx_ch;
        enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
        enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
-       u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR);
        int eccsteps = chip->ecc.steps;
        int eccsize = chip->ecc.size;
        unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
@@ -948,8 +915,8 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
        stm32_fmc2_nfc_enable_seq_irq(nfc);
 
        /* Start the transfer */
-       csqcr |= FMC2_CSQCR_CSQSTART;
-       writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR);
+       regmap_update_bits(nfc->regmap, FMC2_CSQCR,
+                          FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
 
        /* Wait end of sequencer transfer */
        if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
@@ -1042,11 +1009,13 @@ static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
 }
 
 /* Get a status indicating which sectors have errors */
-static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
+static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
 {
-       u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR);
+       u32 csqemsr;
 
-       return csqemsr & FMC2_CSQEMSR_SEM;
+       regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
+
+       return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
 }
 
 static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
@@ -1302,22 +1271,22 @@ static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
        u32 isr, sr;
 
        /* Check if there is no pending requests to the NAND flash */
-       if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr,
-                                             sr & FMC2_SR_NWRF, 1,
-                                             1000 * FMC2_TIMEOUT_MS))
+       if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
+                                    sr & FMC2_SR_NWRF, 1,
+                                    1000 * FMC2_TIMEOUT_MS))
                dev_warn(nfc->dev, "Waitrdy timeout\n");
 
        /* Wait tWB before R/B# signal is low */
-       timings = nand_get_sdr_timings(&chip->data_interface);
+       timings = nand_get_sdr_timings(nand_get_interface_config(chip));
        ndelay(PSEC_TO_NSEC(timings->tWB_max));
 
        /* R/B# signal is low, clear high level flag */
-       writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR);
+       regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
 
        /* Wait R/B# signal is high */
-       return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR,
-                                                isr, isr & FMC2_ISR_IHLF,
-                                                5, 1000 * timeout_ms);
+       return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
+                                       isr & FMC2_ISR_IHLF, 5,
+                                       1000 * FMC2_TIMEOUT_MS);
 }
 
 static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
@@ -1375,8 +1344,9 @@ static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
 
 static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
 {
-       u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
-       u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1);
+       u32 pcr;
+
+       regmap_read(nfc->regmap, FMC2_PCR, &pcr);
 
        /* Set CS used to undefined */
        nfc->cs_sel = -1;
@@ -1407,12 +1377,13 @@ static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
        pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
 
        /* Enable FMC2 controller */
-       bcr1 |= FMC2_BCR1_FMC2EN;
+       if (nfc->dev == nfc->cdev)
+               regmap_update_bits(nfc->regmap, FMC2_BCR1,
+                                  FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
 
-       writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1);
-       writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
-       writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM);
-       writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT);
+       regmap_write(nfc->regmap, FMC2_PCR, pcr);
+       regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
+       regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
 }
 
 static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
@@ -1546,7 +1517,7 @@ static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
 }
 
 static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
-                                         const struct nand_data_interface *conf)
+                                         const struct nand_interface_config *conf)
 {
        const struct nand_sdr_timings *sdrt;
 
@@ -1570,7 +1541,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
        nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
        if (IS_ERR(nfc->dma_tx_ch)) {
                ret = PTR_ERR(nfc->dma_tx_ch);
-               if (ret != -ENODEV)
+               if (ret != -ENODEV && ret != -EPROBE_DEFER)
                        dev_err(nfc->dev,
                                "failed to request tx DMA channel: %d\n", ret);
                nfc->dma_tx_ch = NULL;
@@ -1580,7 +1551,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
        nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
        if (IS_ERR(nfc->dma_rx_ch)) {
                ret = PTR_ERR(nfc->dma_rx_ch);
-               if (ret != -ENODEV)
+               if (ret != -ENODEV && ret != -EPROBE_DEFER)
                        dev_err(nfc->dev,
                                "failed to request rx DMA channel: %d\n", ret);
                nfc->dma_rx_ch = NULL;
@@ -1590,7 +1561,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
        nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
        if (IS_ERR(nfc->dma_ecc_ch)) {
                ret = PTR_ERR(nfc->dma_ecc_ch);
-               if (ret != -ENODEV)
+               if (ret != -ENODEV && ret != -EPROBE_DEFER)
                        dev_err(nfc->dev,
                                "failed to request ecc DMA channel: %d\n", ret);
                nfc->dma_ecc_ch = NULL;
@@ -1764,7 +1735,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
 static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
        .attach_chip = stm32_fmc2_nfc_attach_chip,
        .exec_op = stm32_fmc2_nfc_exec_op,
-       .setup_data_interface = stm32_fmc2_nfc_setup_interface,
+       .setup_interface = stm32_fmc2_nfc_setup_interface,
 };
 
 static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
@@ -1838,6 +1809,33 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
        return ret;
 }
 
+static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
+{
+       struct device *dev = nfc->dev;
+       bool ebi_found = false;
+
+       if (dev->parent && of_device_is_compatible(dev->parent->of_node,
+                                                  "st,stm32mp1-fmc2-ebi"))
+               ebi_found = true;
+
+       if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
+               if (ebi_found) {
+                       nfc->cdev = dev->parent;
+
+                       return 0;
+               }
+
+               return -EINVAL;
+       }
+
+       if (ebi_found)
+               return -EINVAL;
+
+       nfc->cdev = dev;
+
+       return 0;
+}
+
 static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -1847,7 +1845,9 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
        struct resource *res;
        struct mtd_info *mtd;
        struct nand_chip *chip;
+       struct resource cres;
        int chip_cs, mem_region, ret, irq;
+       int start_region = 0;
 
        nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
        if (!nfc)
@@ -1857,18 +1857,28 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
        nand_controller_init(&nfc->base);
        nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
 
+       ret = stm32_fmc2_nfc_set_cdev(nfc);
+       if (ret)
+               return ret;
+
        ret = stm32_fmc2_nfc_parse_dt(nfc);
        if (ret)
                return ret;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       nfc->io_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(nfc->io_base))
-               return PTR_ERR(nfc->io_base);
+       ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
+       if (ret)
+               return ret;
+
+       nfc->io_phys_addr = cres.start;
+
+       nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
+       if (IS_ERR(nfc->regmap))
+               return PTR_ERR(nfc->regmap);
 
-       nfc->io_phys_addr = res->start;
+       if (nfc->dev == nfc->cdev)
+               start_region = 1;
 
-       for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
+       for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
             chip_cs++, mem_region += 3) {
                if (!(nfc->cs_assigned & BIT(chip_cs)))
                        continue;
@@ -1906,7 +1916,7 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
 
        init_completion(&nfc->complete);
 
-       nfc->clk = devm_clk_get(dev, NULL);
+       nfc->clk = devm_clk_get(nfc->cdev, NULL);
        if (IS_ERR(nfc->clk))
                return PTR_ERR(nfc->clk);
 
@@ -2047,6 +2057,7 @@ static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
 
 static const struct of_device_id stm32_fmc2_nfc_match[] = {
        {.compatible = "st,stm32mp15-fmc2"},
+       {.compatible = "st,stm32mp1-fmc2-nfc"},
        {}
 };
 MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
index ffbc165..9c50c2b 100644 (file)
@@ -1376,8 +1376,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
 #define sunxi_nand_lookup_timing(l, p, c) \
                        _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
 
-static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
-                                       const struct nand_data_interface *conf)
+static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
+                                    const struct nand_interface_config *conf)
 {
        struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
        struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
@@ -1920,7 +1920,7 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand,
 
 static const struct nand_controller_ops sunxi_nand_controller_ops = {
        .attach_chip = sunxi_nand_attach_chip,
-       .setup_data_interface = sunxi_nfc_setup_data_interface,
+       .setup_interface = sunxi_nfc_setup_interface,
        .exec_op = sunxi_nfc_exec_op,
 };
 
index 246871e..bdb965a 100644 (file)
@@ -113,59 +113,80 @@ struct tango_chip {
 
 #define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
 
-static void tango_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
+static void tango_select_target(struct nand_chip *chip, unsigned int cs)
 {
+       struct tango_nfc *nfc = to_tango_nfc(chip->controller);
        struct tango_chip *tchip = to_tango_chip(chip);
 
-       if (ctrl & NAND_CLE)
-               writeb_relaxed(dat, tchip->base + PBUS_CMD);
-
-       if (ctrl & NAND_ALE)
-               writeb_relaxed(dat, tchip->base + PBUS_ADDR);
+       writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
+       writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
+       writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
+       writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
+       writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
+       writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
 }
 
-static int tango_dev_ready(struct nand_chip *chip)
+static int tango_waitrdy(struct nand_chip *chip, unsigned int timeout_ms)
 {
        struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+       u32 status;
 
-       return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
+       return readl_relaxed_poll_timeout(nfc->pbus_base + PBUS_CS_CTRL,
+                                         status, status & PBUS_IORDY, 20,
+                                         timeout_ms);
 }
 
-static u8 tango_read_byte(struct nand_chip *chip)
+static int tango_exec_instr(struct nand_chip *chip,
+                           const struct nand_op_instr *instr)
 {
        struct tango_chip *tchip = to_tango_chip(chip);
+       unsigned int i;
 
-       return readb_relaxed(tchip->base + PBUS_DATA);
-}
-
-static void tango_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
-       struct tango_chip *tchip = to_tango_chip(chip);
+       switch (instr->type) {
+       case NAND_OP_CMD_INSTR:
+               writeb_relaxed(instr->ctx.cmd.opcode, tchip->base + PBUS_CMD);
+               return 0;
+       case NAND_OP_ADDR_INSTR:
+               for (i = 0; i < instr->ctx.addr.naddrs; i++)
+                       writeb_relaxed(instr->ctx.addr.addrs[i],
+                                      tchip->base + PBUS_ADDR);
+               return 0;
+       case NAND_OP_DATA_IN_INSTR:
+               ioread8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.in,
+                           instr->ctx.data.len);
+               return 0;
+       case NAND_OP_DATA_OUT_INSTR:
+               iowrite8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.out,
+                            instr->ctx.data.len);
+               return 0;
+       case NAND_OP_WAITRDY_INSTR:
+               return tango_waitrdy(chip,
+                                    instr->ctx.waitrdy.timeout_ms);
+       default:
+               break;
+       }
 
-       ioread8_rep(tchip->base + PBUS_DATA, buf, len);
+       return -EINVAL;
 }
 
-static void tango_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+static int tango_exec_op(struct nand_chip *chip,
+                        const struct nand_operation *op,
+                        bool check_only)
 {
-       struct tango_chip *tchip = to_tango_chip(chip);
-
-       iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
-}
+       unsigned int i;
+       int ret = 0;
 
-static void tango_select_chip(struct nand_chip *chip, int idx)
-{
-       struct tango_nfc *nfc = to_tango_nfc(chip->controller);
-       struct tango_chip *tchip = to_tango_chip(chip);
+       if (check_only)
+               return 0;
 
-       if (idx < 0)
-               return; /* No "chip unselect" function */
+       tango_select_target(chip, op->cs);
+       for (i = 0; i < op->ninstrs; i++) {
+               ret = tango_exec_instr(chip, &op->instrs[i]);
+               if (ret)
+                       break;
+       }
 
-       writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
-       writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
-       writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
-       writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
-       writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
-       writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
+       return ret;
 }
 
 /*
@@ -279,6 +300,7 @@ static int tango_read_page(struct nand_chip *chip, u8 *buf,
        struct tango_nfc *nfc = to_tango_nfc(chip->controller);
        int err, res, len = mtd->writesize;
 
+       tango_select_target(chip, chip->cur_cs);
        if (oob_required)
                chip->ecc.read_oob(chip, page);
 
@@ -300,22 +322,30 @@ static int tango_write_page(struct nand_chip *chip, const u8 *buf,
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
        struct tango_nfc *nfc = to_tango_nfc(chip->controller);
-       int err, status, len = mtd->writesize;
+       const struct nand_sdr_timings *timings;
+       int err, len = mtd->writesize;
+       u8 status;
 
        /* Calling tango_write_oob() would send PAGEPROG twice */
        if (oob_required)
                return -ENOTSUPP;
 
+       tango_select_target(chip, chip->cur_cs);
        writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
        err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
        if (err)
                return err;
 
-       status = chip->legacy.waitfunc(chip);
-       if (status & NAND_STATUS_FAIL)
-               return -EIO;
+       timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+       err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max));
+       if (err)
+               return err;
 
-       return 0;
+       err = nand_status_op(chip, &status);
+       if (err)
+               return err;
+
+       return (status & NAND_STATUS_FAIL) ? -EIO : 0;
 }
 
 static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
@@ -326,7 +356,9 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
                /* skip over "len" bytes */
                nand_change_read_column_op(chip, *pos, NULL, 0, false);
        } else {
-               tango_read_buf(chip, *buf, len);
+               struct tango_chip *tchip = to_tango_chip(chip);
+
+               ioread8_rep(tchip->base + PBUS_DATA, *buf, len);
                *buf += len;
        }
 }
@@ -339,7 +371,9 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
                /* skip over "len" bytes */
                nand_change_write_column_op(chip, *pos, NULL, 0, false);
        } else {
-               tango_write_buf(chip, *buf, len);
+               struct tango_chip *tchip = to_tango_chip(chip);
+
+               iowrite8_rep(tchip->base + PBUS_DATA, *buf, len);
                *buf += len;
        }
 }
@@ -420,6 +454,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
 static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
                               int oob_required, int page)
 {
+       tango_select_target(chip, chip->cur_cs);
        nand_read_page_op(chip, page, 0, NULL, 0);
        raw_read(chip, buf, chip->oob_poi);
        return 0;
@@ -428,6 +463,7 @@ static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
 static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
                                int oob_required, int page)
 {
+       tango_select_target(chip, chip->cur_cs);
        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
        raw_write(chip, buf, chip->oob_poi);
        return nand_prog_page_end_op(chip);
@@ -435,6 +471,7 @@ static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
 
 static int tango_read_oob(struct nand_chip *chip, int page)
 {
+       tango_select_target(chip, chip->cur_cs);
        nand_read_page_op(chip, page, 0, NULL, 0);
        raw_read(chip, NULL, chip->oob_poi);
        return 0;
@@ -442,6 +479,7 @@ static int tango_read_oob(struct nand_chip *chip, int page)
 
 static int tango_write_oob(struct nand_chip *chip, int page)
 {
+       tango_select_target(chip, chip->cur_cs);
        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
        raw_write(chip, NULL, chip->oob_poi);
        return nand_prog_page_end_op(chip);
@@ -477,7 +515,7 @@ static u32 to_ticks(int kHz, int ps)
 }
 
 static int tango_set_timings(struct nand_chip *chip, int csline,
-                            const struct nand_data_interface *conf)
+                            const struct nand_interface_config *conf)
 {
        const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
        struct tango_nfc *nfc = to_tango_nfc(chip->controller);
@@ -527,7 +565,8 @@ static int tango_attach_chip(struct nand_chip *chip)
 
 static const struct nand_controller_ops tango_controller_ops = {
        .attach_chip = tango_attach_chip,
-       .setup_data_interface = tango_set_timings,
+       .setup_interface = tango_set_timings,
+       .exec_op = tango_exec_op,
 };
 
 static int chip_init(struct device *dev, struct device_node *np)
@@ -562,12 +601,6 @@ static int chip_init(struct device *dev, struct device_node *np)
        ecc = &chip->ecc;
        mtd = nand_to_mtd(chip);
 
-       chip->legacy.read_byte = tango_read_byte;
-       chip->legacy.write_buf = tango_write_buf;
-       chip->legacy.read_buf = tango_read_buf;
-       chip->legacy.select_chip = tango_select_chip;
-       chip->legacy.cmd_ctrl = tango_cmd_ctrl;
-       chip->legacy.dev_ready = tango_dev_ready;
        chip->options = NAND_USES_DMA |
                        NAND_NO_SUBPAGE_WRITE |
                        NAND_WAIT_TCCS;
index f9d046b..6b6212f 100644 (file)
@@ -813,8 +813,8 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
        writel_relaxed(reg, ctrl->regs + TIMING_2);
 }
 
-static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline,
-                                       const struct nand_data_interface *conf)
+static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
+                                     const struct nand_interface_config *conf)
 {
        struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
        const struct nand_sdr_timings *timings;
@@ -1053,7 +1053,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
 static const struct nand_controller_ops tegra_nand_controller_ops = {
        .attach_chip = &tegra_nand_attach_chip,
        .exec_op = tegra_nand_exec_op,
-       .setup_data_interface = tegra_nand_setup_data_interface,
+       .setup_interface = tegra_nand_setup_interface,
 };
 
 static int tegra_nand_chips_init(struct device *dev,
index 78f90c6..b15bdad 100644 (file)
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_MIPS
+#include <asm/bootinfo.h>
+#include <asm/fw/cfe/cfe_api.h>
+#endif /* CONFIG_MIPS */
+
 #define BCM963XX_CFE_BLOCK_SIZE                SZ_64K  /* always at least 64KiB */
 
 #define BCM963XX_CFE_MAGIC_OFFSET      0x4e0
 #define STR_NULL_TERMINATE(x) \
        do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
 
-static int bcm63xx_detect_cfe(struct mtd_info *master)
+static inline int bcm63xx_detect_cfe(void)
 {
-       char buf[9];
-       int ret;
-       size_t retlen;
+       int ret = 0;
 
-       ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
-                      (void *)buf);
-       buf[retlen] = 0;
+#ifdef CONFIG_MIPS
+       ret = (fw_arg3 == CFE_EPTSEAL);
+#endif /* CONFIG_MIPS */
 
-       if (ret)
-               return ret;
-
-       if (strncmp("cfe-v", buf, 5) == 0)
-               return 0;
-
-       /* very old CFE's do not have the cfe-v string, so check for magic */
-       ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen,
-                      (void *)buf);
-       buf[retlen] = 0;
-
-       return strncmp("CFE1CFE1", buf, 8);
+       return ret;
 }
 
 static int bcm63xx_read_nvram(struct mtd_info *master,
@@ -138,7 +130,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
        struct bcm963xx_nvram *nvram = NULL;
        int ret;
 
-       if (bcm63xx_detect_cfe(master))
+       if (!bcm63xx_detect_cfe())
                return -EINVAL;
 
        nvram = vzalloc(sizeof(*nvram));
index 81329f6..c72aa1a 100644 (file)
@@ -68,7 +68,9 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
index 61d2a0a..b54a56a 100644 (file)
@@ -292,7 +292,7 @@ static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
        u32 val;
 
        return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
-                                 !(val & HSFSTS_CTL_SCIP), 40,
+                                 !(val & HSFSTS_CTL_SCIP), 0,
                                  INTEL_SPI_TIMEOUT * 1000);
 }
 
@@ -301,7 +301,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
        u32 val;
 
        return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
-                                 !(val & SSFSTS_CTL_SCIP), 40,
+                                 !(val & SSFSTS_CTL_SCIP), 0,
                                  INTEL_SPI_TIMEOUT * 1000);
 }
 
@@ -612,6 +612,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
                return 0;
        }
 
+       /*
+        * We hope that HW sequencer will do the right thing automatically and
+        * with the SW sequencer we cannot use preopcode anyway, so just ignore
+        * the Write Disable operation and pretend it was completed
+        * successfully.
+        */
+       if (opcode == SPINOR_OP_WRDI)
+               return 0;
+
        writel(0, ispi->base + FADDR);
 
        /* Write the value beforehand */
index 0369d98..65eff4c 100644 (file)
@@ -1907,15 +1907,16 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 }
 
 /**
- * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
- * Register 1.
+ * spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the
+ *                                  Status Register 1.
  * @nor:       pointer to a 'struct spi_nor'
+ * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
 {
        int ret;
 
@@ -1923,45 +1924,56 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
        if (ret)
                return ret;
 
-       if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
+       if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) ||
+           (!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)))
                return 0;
 
-       nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+       if (enable)
+               nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
+       else
+               nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6;
 
        return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
 }
 
 /**
- * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
- * Register 2.
+ * spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the
+ *                                  Status Register 2.
  * @nor:       pointer to a 'struct spi_nor'.
+ * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
 {
        int ret;
 
        if (nor->flags & SNOR_F_NO_READ_CR)
-               return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
+               return spi_nor_write_16bit_cr_and_check(nor,
+                                               enable ? SR2_QUAD_EN_BIT1 : 0);
 
        ret = spi_nor_read_cr(nor, nor->bouncebuf);
        if (ret)
                return ret;
 
-       if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
+       if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) ||
+           (!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)))
                return 0;
 
-       nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+       if (enable)
+               nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
+       else
+               nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1;
 
        return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
 }
 
 /**
- * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2.
  * @nor:       pointer to a 'struct spi_nor'
+ * @enable:    true to enable Quad mode, false to disable Quad mode.
  *
  * Set the Quad Enable (QE) bit in the Status Register 2.
  *
@@ -1971,7 +1983,7 @@ int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
  *
  * Return: 0 on success, -errno otherwise.
  */
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
 {
        u8 *sr2 = nor->bouncebuf;
        int ret;
@@ -1981,11 +1993,15 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
        ret = spi_nor_read_sr2(nor, sr2);
        if (ret)
                return ret;
-       if (*sr2 & SR2_QUAD_EN_BIT7)
+       if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) ||
+           (!enable && !(*sr2 & SR2_QUAD_EN_BIT7)))
                return 0;
 
        /* Update the Quad Enable bit. */
-       *sr2 |= SR2_QUAD_EN_BIT7;
+       if (enable)
+               *sr2 |= SR2_QUAD_EN_BIT7;
+       else
+               *sr2 &= ~SR2_QUAD_EN_BIT7;
 
        ret = spi_nor_write_sr2(nor, sr2);
        if (ret)
@@ -2898,12 +2914,13 @@ static int spi_nor_init_params(struct spi_nor *nor)
 }
 
 /**
- * spi_nor_quad_enable() - enable Quad I/O if needed.
+ * spi_nor_quad_enable() - enable/disable Quad I/O if needed.
  * @nor:                pointer to a 'struct spi_nor'
+ * @enable:             true to enable Quad mode. false to disable Quad mode.
  *
  * Return: 0 on success, -errno otherwise.
  */
-static int spi_nor_quad_enable(struct spi_nor *nor)
+static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
 {
        if (!nor->params->quad_enable)
                return 0;
@@ -2912,7 +2929,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
              spi_nor_get_protocol_width(nor->write_proto) == 4))
                return 0;
 
-       return nor->params->quad_enable(nor);
+       return nor->params->quad_enable(nor, enable);
 }
 
 /**
@@ -2936,7 +2953,7 @@ static int spi_nor_init(struct spi_nor *nor)
 {
        int err;
 
-       err = spi_nor_quad_enable(nor);
+       err = spi_nor_quad_enable(nor, true);
        if (err) {
                dev_dbg(nor->dev, "quad mode not supported\n");
                return err;
@@ -2983,6 +3000,8 @@ void spi_nor_restore(struct spi_nor *nor)
        if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
            nor->flags & SNOR_F_BROKEN_RESET)
                nor->params->set_4byte_addr_mode(nor, false);
+
+       spi_nor_quad_enable(nor, false);
 }
 EXPORT_SYMBOL_GPL(spi_nor_restore);
 
index 6f2f6b2..95aa32f 100644 (file)
@@ -198,7 +198,7 @@ struct spi_nor_locking_ops {
  *                      higher index in the array, the higher priority.
  * @erase_map:         the erase map parsed from the SFDP Sector Map Parameter
  *                      Table.
- * @quad_enable:       enables SPI NOR quad mode.
+ * @quad_enable:       enables/disables SPI NOR Quad mode.
  * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
  * @convert_addr:      converts an absolute address into something the flash
  *                      will understand. Particularly useful when pagesize is
@@ -219,7 +219,7 @@ struct spi_nor_flash_parameter {
 
        struct spi_nor_erase_map        erase_map;
 
-       int (*quad_enable)(struct spi_nor *nor);
+       int (*quad_enable)(struct spi_nor *nor, bool enable);
        int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
        u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
        int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
@@ -406,9 +406,9 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
 int spi_nor_wait_till_ready(struct spi_nor *nor);
 int spi_nor_lock_and_prep(struct spi_nor *nor);
 void spi_nor_unlock_and_unprep(struct spi_nor *nor);
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable);
 
 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
index 96735d8..f97f3d1 100644 (file)
@@ -52,6 +52,9 @@ static const struct flash_info macronix_parts[] = {
        { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
        { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
        { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+       { "mx25r1635f",  INFO(0xc22815, 0, 64 * 1024,  32,
+                             SECT_4K | SPI_NOR_DUAL_READ |
+                             SPI_NOR_QUAD_READ) },
        { "mx25r3235f",  INFO(0xc22816, 0, 64 * 1024,  64,
                              SECT_4K | SPI_NOR_DUAL_READ |
                              SPI_NOR_QUAD_READ) },
@@ -84,6 +87,9 @@ static const struct flash_info macronix_parts[] = {
                              SPI_NOR_QUAD_READ) },
        { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048,
                              SPI_NOR_QUAD_READ) },
+       { "mx66u2g45g",  INFO(0xc2253c, 0, 64 * 1024, 4096,
+                             SECT_4K | SPI_NOR_DUAL_READ |
+                             SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
 };
 
 static void macronix_default_init(struct spi_nor *nor)
index 3dca5b9..ef36950 100644 (file)
@@ -71,8 +71,8 @@ static const struct flash_info st_parts[] = {
                              SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
                              NO_CHIP_ERASE) },
        { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096,
-                             SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
-                             NO_CHIP_ERASE) },
+                             SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+                             SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 
        { "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
        { "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
index 55c0c50..e2a43d3 100644 (file)
@@ -598,7 +598,8 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
                break;
 
        default:
-               return -EINVAL;
+               dev_dbg(nor->dev, "BFPT QER reserved value used\n");
+               break;
        }
 
        /* Stop here if not JESD216 rev C or later. */
index e550cd5..8429b4a 100644 (file)
@@ -64,7 +64,6 @@ static const struct flash_info spansion_parts[] = {
        { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
                              SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
          .fixups = &s25fs_s_fixups, },
-       { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
        { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
        { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
        { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64,
@@ -84,7 +83,8 @@ static const struct flash_info spansion_parts[] = {
                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32,
                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
+       { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128,
+                            SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32,
                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
index 5062af1..6dcde15 100644 (file)
@@ -64,10 +64,12 @@ static const struct flash_info winbond_parts[] = {
                            SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                            SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
        { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
-       { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+       { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
+                        SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
                           SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                           SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+       { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) },
        { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
                            SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
                            SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
index 83afc00..28f55f9 100644 (file)
@@ -381,6 +381,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
                ubi->fm_anchor = NULL;
        }
 
+       if (ubi->fm_next_anchor) {
+               return_unused_peb(ubi, ubi->fm_next_anchor);
+               ubi->fm_next_anchor = NULL;
+       }
+
        if (ubi->fm) {
                for (i = 0; i < ubi->fm->used_blocks; i++)
                        kfree(ubi->fm->e[i]);
index 2763606..42cac57 100644 (file)
@@ -1086,7 +1086,8 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
        if (!err) {
                spin_lock(&ubi->wl_lock);
 
-               if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
+               if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
+                   e->pnum < UBI_FM_MAX_START) {
                        /* Abort anchor production, if needed it will be
                         * enabled again in the wear leveling started below.
                         */
index 31e43a2..cddaa43 100644 (file)
@@ -130,7 +130,7 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
 
 /**
  * __get_first_agg - get the first aggregator in the bond
- * @bond: the bond we're looking at
+ * @port: the port we're looking at
  *
  * Return the aggregator of the first slave in @bond, or %NULL if it can't be
  * found.
@@ -1626,7 +1626,7 @@ static int agg_device_up(const struct aggregator *agg)
 
 /**
  * ad_agg_selection_logic - select an aggregation group for a team
- * @aggregator: the aggregator we're looking at
+ * @agg: the aggregator we're looking at
  * @update_slave_arr: Does slave array need update?
  *
  * It is assumed that only one aggregator may be selected for a team.
@@ -1810,7 +1810,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
 
 /**
  * ad_initialize_port - initialize a given port's parameters
- * @aggregator: the aggregator we're looking at
+ * @port: the port we're looking at
  * @lacp_fast: boolean. whether fast periodic should be used
  */
 static void ad_initialize_port(struct port *port, int lacp_fast)
@@ -1967,6 +1967,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
 /**
  * bond_3ad_initiate_agg_selection - initate aggregator selection
  * @bond: bonding struct
+ * @timeout: timeout value to set
  *
  * Set the aggregation selection timer, to initiate an agg selection in
  * the very near future.  Called during first initialization, and during
@@ -2259,7 +2260,7 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
 
 /**
  * bond_3ad_state_machine_handler - handle state machines timeout
- * @bond: bonding struct to work on
+ * @work: work context to fetch bonding struct to work on from
  *
  * The state machine handling concept in this module is to check every tick
  * which state machine should operate any function. The execution order is
@@ -2500,7 +2501,7 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
 /**
  * bond_3ad_handle_link_change - handle a slave's link status change indication
  * @slave: slave struct to work on
- * @status: whether the link is now up or down
+ * @link: whether the link is now up or down
  *
  * Handle reselection of aggregator (if needed) for this port.
  */
@@ -2551,7 +2552,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
 /**
  * bond_3ad_set_carrier - set link state for bonding master
- * @bond - bonding structure
+ * @bond: bonding structure
  *
  * if we have an active aggregator, we're up, if not, we're down.
  * Presumes that we cannot have an active aggregator if there are
@@ -2664,7 +2665,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 
 /**
  * bond_3ad_update_lacp_rate - change the lacp rate
- * @bond - bonding struct
+ * @bond: bonding struct
  *
  * When modify lacp_rate parameter via sysfs,
  * update actor_oper_port_state of each port.
index 095ea51..4e1b7de 100644 (file)
@@ -1206,8 +1206,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
 
 /**
  * alb_set_mac_address
- * @bond:
- * @addr:
+ * @bond: bonding we're working on
+ * @addr: MAC address to set
  *
  * In TLB mode all slaves are configured to the bond's hw address, but set
  * their dev_addr field to different addresses (based on their permanent hw
index 5ad43aa..415a37e 100644 (file)
@@ -322,6 +322,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 /**
  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
  * @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
  * @vid: vlan id being added
  */
 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
@@ -355,6 +356,7 @@ unwind:
 /**
  * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
  * @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
  * @vid: vlan id being removed
  */
 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
@@ -948,7 +950,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
 /**
  * change_active_interface - change the active slave into the specified one
  * @bond: our bonding struct
- * @new: the new slave to make the active one
+ * @new_active: the new slave to make the active one
  *
  * Set the new slave to the bond's settings and unset them on the old
  * curr_active_slave.
@@ -2205,7 +2207,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = __bond_release_one(bond_dev, slave_dev, false, true);
-       if (ret == 0 && !bond_has_slaves(bond)) {
+       if (ret == 0 && !bond_has_slaves(bond) &&
+           bond_dev->reg_state != NETREG_UNREGISTERING) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                netdev_info(bond_dev, "Destroying bond\n");
                bond_remove_proc_entry(bond);
@@ -4552,13 +4555,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
+static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
+{
+       if (speed == 0 || speed == SPEED_UNKNOWN)
+               speed = slave->speed;
+       else
+               speed = min(speed, slave->speed);
+
+       return speed;
+}
+
 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
                                           struct ethtool_link_ksettings *cmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       unsigned long speed = 0;
        struct list_head *iter;
        struct slave *slave;
+       u32 speed = 0;
 
        cmd->base.duplex = DUPLEX_UNKNOWN;
        cmd->base.port = PORT_OTHER;
@@ -4570,8 +4583,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
         */
        bond_for_each_slave(bond, slave, iter) {
                if (bond_slave_can_tx(slave)) {
-                       if (slave->speed != SPEED_UNKNOWN)
-                               speed += slave->speed;
+                       if (slave->speed != SPEED_UNKNOWN) {
+                               if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
+                                       speed = bond_mode_bcast_speed(slave,
+                                                                     speed);
+                               else
+                                       speed += slave->speed;
+                       }
                        if (cmd->base.duplex == DUPLEX_UNKNOWN &&
                            slave->duplex != DUPLEX_UNKNOWN)
                                cmd->base.duplex = slave->duplex;
index ef1c315..bd0ada4 100644 (file)
@@ -951,7 +951,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
 static void update_stats(struct net_device *dev)
 {
        unsigned int ioaddr = dev->base_addr;
-       u8 rx, tx, up;
+       u8 up;
 
        pr_debug("%s: updating the statistics.\n", dev->name);
 
@@ -972,8 +972,8 @@ static void update_stats(struct net_device *dev)
        dev->stats.tx_packets                   += (up&0x30) << 4;
        /* Rx packets   */                         inb(ioaddr + 7);
        /* Tx deferrals */                         inb(ioaddr + 8);
-       rx                                       = inw(ioaddr + 10);
-       tx                                       = inw(ioaddr + 12);
+       /* rx */                                   inw(ioaddr + 10);
+       /* tx */                                   inw(ioaddr + 12);
 
        EL3WINDOW(4);
        /* BadSSD */                               inb(ioaddr + 12);
index aeae796..08db4c9 100644 (file)
@@ -898,6 +898,7 @@ static int ax_close(struct net_device *dev)
 /**
  * axnet_tx_timeout - handle transmit time out condition
  * @dev: network device which has apparently fallen asleep
+ * @txqueue: unused
  *
  * Called by kernel when device never acknowledges a transmit has
  * completed (or failed) - i.e. never posted a Tx related interrupt.
index 9934421..fb37816 100644 (file)
@@ -3715,11 +3715,11 @@ failed_mii_init:
 failed_irq:
 failed_init:
        fec_ptp_stop(pdev);
-       if (fep->reg_phy)
-               regulator_disable(fep->reg_phy);
 failed_reset:
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
 failed_regulator:
        clk_disable_unprepare(fep->clk_ahb);
 failed_clk_ahb:
index 9162856..e972138 100644 (file)
@@ -1728,7 +1728,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
        /* hardware completion status should be available by this time */
        if (ret) {
                dev_err(&hdev->pdev->dev,
-                       "could'nt get reset done status from h/w, timeout!\n");
+                       "couldn't get reset done status from h/w, timeout!\n");
                return ret;
        }
 
index c6adc77..16bda73 100644 (file)
@@ -334,19 +334,14 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
 static int chip_fault_show(struct devlink_fmsg *fmsg,
                           struct hinic_fault_event *event)
 {
-       char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
-               "fatal", "reset", "flr", "general", "suggestion"};
-       char level_str[FAULT_SHOW_STR_LEN + 1] = {0};
-       u8 level;
+       const char * const level_str[FAULT_LEVEL_MAX + 1] = {
+               "fatal", "reset", "flr", "general", "suggestion", "Unknown"};
+       u8 fault_level;
        int err;
 
-       level = event->event.chip.err_level;
-       if (level < FAULT_LEVEL_MAX)
-               strncpy(level_str, fault_level[level], strlen(fault_level[level]));
-       else
-               strncpy(level_str, "Unknown", strlen("Unknown"));
-
-       if (level == FAULT_LEVEL_SERIOUS_FLR) {
+       fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
+               event->event.chip.err_level : FAULT_LEVEL_MAX;
+       if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
                err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
                                                (u32)event->event.chip.func_id);
                if (err)
@@ -361,7 +356,7 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
        if (err)
                return err;
 
-       err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str);
+       err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
        if (err)
                return err;
 
@@ -381,18 +376,15 @@ static int chip_fault_show(struct devlink_fmsg *fmsg,
 static int fault_report_show(struct devlink_fmsg *fmsg,
                             struct hinic_fault_event *event)
 {
-       char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+       const char * const type_str[FAULT_TYPE_MAX + 1] = {
                "chip", "ucode", "mem rd timeout", "mem wr timeout",
-               "reg rd timeout", "reg wr timeout", "phy fault"};
-       char type_str[FAULT_SHOW_STR_LEN + 1] = {0};
+               "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
+       u8 fault_type;
        int err;
 
-       if (event->type < FAULT_TYPE_MAX)
-               strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type]));
-       else
-               strncpy(type_str, "Unknown", strlen("Unknown"));
+       fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
 
-       err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str);
+       err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
        if (err)
                return err;
 
index dc6e645..701eb81 100644 (file)
@@ -504,8 +504,6 @@ enum hinic_fault_type {
        FAULT_TYPE_MAX,
 };
 
-#define FAULT_SHOW_STR_LEN 16
-
 enum hinic_fault_err_level {
        FAULT_LEVEL_FATAL,
        FAULT_LEVEL_SERIOUS_RESET,
index a62ddd6..c0c8efe 100644 (file)
@@ -981,7 +981,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST     0x04
 #define I40E_AQC_SET_VSI_DEFAULT               0x08
 #define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX            0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY       0x8000
        __le16  seid;
        __le16  vlan_tag;
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
index afad5e9..6ab52cb 100644 (file)
@@ -1967,6 +1967,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
 }
 
 /**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+                                 u16 min)
+{
+       return (aq->api_maj_ver > maj ||
+               (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
+/**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
  * @vsi_ctx: pointer to a vsi context struct
@@ -2091,18 +2106,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 
        if (set) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-               if (rx_only_promisc &&
-                   (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
-                    (hw->aq.api_maj_ver > 1)))
-                       flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+               if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+                       flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
        }
 
        cmd->promiscuous_flags = cpu_to_le16(flags);
 
        cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
-       if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
-           (hw->aq.api_maj_ver > 1))
-               cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+       if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+               cmd->valid_flags |=
+                       cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
 
        cmd->seid = cpu_to_le16(seid);
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2199,11 +2212,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
        i40e_fill_default_direct_cmd_desc(&desc,
                                          i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-       if (enable)
+       if (enable) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+               if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+                       flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+       }
 
        cmd->promiscuous_flags = cpu_to_le16(flags);
        cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+               cmd->valid_flags |=
+                       cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
        cmd->seid = cpu_to_le16(seid);
        cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
index b539935..2e433fd 100644 (file)
@@ -15463,6 +15463,9 @@ static void i40e_remove(struct pci_dev *pdev)
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
 
+       while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+               usleep_range(1000, 2000);
+
        /* no more scheduling of any task */
        set_bit(__I40E_SUSPENDED, pf->state);
        set_bit(__I40E_DOWN, pf->state);
index 7a6f2a0..9593aa4 100644 (file)
@@ -5142,6 +5142,8 @@ static int igc_probe(struct pci_dev *pdev,
        device_set_wakeup_enable(&adapter->pdev->dev,
                                 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
 
+       igc_ptp_init(adapter);
+
        /* reset the hardware with the new settings */
        igc_reset(adapter);
 
@@ -5158,9 +5160,6 @@ static int igc_probe(struct pci_dev *pdev,
         /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
-       /* do hw tstamp init after resetting */
-       igc_ptp_init(adapter);
-
        /* Check if Media Autosense is enabled */
        adapter->ei = *ei;
 
index e67d465..36c9992 100644 (file)
@@ -496,8 +496,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
        adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
        adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
 
-       igc_ptp_reset(adapter);
-
        adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
                                                &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
index e67b1a5..0fcd820 100644 (file)
@@ -193,7 +193,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        }
 
        /* alloc the udl from per cpu ddp pool */
-       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
        if (!ddp->udl) {
                e_err(drv, "failed allocated ddp context\n");
                goto out_noddp_unmap;
index 5975521..93c4cf7 100644 (file)
@@ -1226,8 +1226,8 @@ int otx2_config_npa(struct otx2_nic *pfvf)
        if (!hw->pool_cnt)
                return -EINVAL;
 
-       qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
-                                 hw->pool_cnt, GFP_KERNEL);
+       qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
+                                 sizeof(struct otx2_pool), GFP_KERNEL);
        if (!qset->pool)
                return -ENOMEM;
 
index 1944bf5..26988ad 100644 (file)
@@ -412,7 +412,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
 
        new->flags = flags;
 
-       new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
+       new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
                                   GFP_KERNEL);
        if (!new->q.info) {
                netdev_err(lif->netdev, "Cannot allocate queue info\n");
@@ -462,7 +462,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
                new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
        }
 
-       new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
+       new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
                                    GFP_KERNEL);
        if (!new->cq.info) {
                netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
index 20b1b43..1166b98 100644 (file)
@@ -474,13 +474,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
 
        ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
        if (ret)
-               return ret;
+               goto disable_clk_axi;
 
        ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
        if (ret)
-               return ret;
+               goto disable_clk_cfg_ahb;
+
+       ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+       if (ret)
+               goto disable_clk_cfg_ahb;
 
-       return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+       return 0;
+
+disable_clk_cfg_ahb:
+       clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
+disable_clk_axi:
+       clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
+
+       return ret;
 }
 
 /* Enable clocks; needs emac_clks_phase1_init to be called before */
index 36598d0..206d70f 100644 (file)
@@ -979,7 +979,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
                 * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
                 * This is very unlikely to fail.
                 */
-               if (EFX_MIN_DMAQ_SIZE % reader->value) {
+               if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
+                   EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
                        netif_err(efx, probe, efx->net_dev,
                                  "%s size granularity is %llu, can't guarantee safety\n",
                                  reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
index 13ba1a4..012925e 100644 (file)
 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH    \
                ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
 
+bool ef100_rx_buf_hash_valid(const u8 *prefix)
+{
+       return PREFIX_FIELD(prefix, RSS_HASH_VALID);
+}
+
 static bool check_fcs(struct efx_channel *channel, u32 *prefix)
 {
        u16 rxclass;
index f2f2668..fe45b36 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "net_driver.h"
 
+bool ef100_rx_buf_hash_valid(const u8 *prefix);
 void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
 void ef100_rx_write(struct efx_rx_queue *rx_queue);
 void __ef100_rx_packet(struct efx_channel *channel);
index a9808e8..daf0c00 100644 (file)
@@ -45,6 +45,14 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
                                __ef100_rx_packet, __efx_rx_packet,
                                channel);
 }
+static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
+{
+       if (efx->type->rx_buf_hash_valid)
+               return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
+                                      ef100_rx_buf_hash_valid,
+                                      prefix);
+       return true;
+}
 
 /* Maximum number of TCP segments we support for soft-TSO */
 #define EFX_TSO_MAX_SEGS       100
index 7bb7ecb..dcb741d 100644 (file)
@@ -1265,6 +1265,7 @@ struct efx_udp_tunnel {
  * @rx_write: Write RX descriptors and doorbell
  * @rx_defer_refill: Generate a refill reminder event
  * @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
  * @ev_probe: Allocate resources for event queue
  * @ev_init: Initialise event queue on the NIC
  * @ev_fini: Deinitialise event queue on the NIC
@@ -1409,6 +1410,7 @@ struct efx_nic_type {
        void (*rx_write)(struct efx_rx_queue *rx_queue);
        void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
        void (*rx_packet)(struct efx_channel *channel);
+       bool (*rx_buf_hash_valid)(const u8 *prefix);
        int (*ev_probe)(struct efx_channel *channel);
        int (*ev_init)(struct efx_channel *channel);
        void (*ev_fini)(struct efx_channel *channel);
index fb77c7b..ef9bca9 100644 (file)
@@ -525,7 +525,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
                return;
        }
 
-       if (efx->net_dev->features & NETIF_F_RXHASH)
+       if (efx->net_dev->features & NETIF_F_RXHASH &&
+           efx_rx_buf_hash_valid(efx, eh))
                skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
                             PKT_HASH_TYPE_L3);
        if (csum) {
index 02102c7..bf3250e 100644 (file)
@@ -351,6 +351,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
        plat_dat->has_gmac = true;
        plat_dat->bsp_priv = gmac;
        plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+       plat_dat->multicast_filter_bins = 0;
 
        err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (err)
index efc6ec1..fc8759f 100644 (file)
@@ -164,6 +164,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
                value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
        } else if (dev->flags & IFF_ALLMULTI) {
                value = GMAC_FRAME_FILTER_PM;   /* pass all multi */
+       } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
+               /* Fall back to all multicast if we've no filter */
+               value = GMAC_FRAME_FILTER_PM;
        } else if (!netdev_mc_empty(dev)) {
                struct netdev_hw_addr *ha;
 
index e9bf429..4eea340 100644 (file)
 #define KERNEL
 #include "h/smtstate.h"
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)cfm.c       2.18 98/10/06 (C) SK " ;
-#endif
-
 /*
  * FSM Macros
  */
@@ -208,7 +204,6 @@ void cfm(struct s_smc *smc, int event)
 {
        int     state ;         /* remember last state */
        int     cond ;
-       int     oldstate ;
 
        /* We will do the following: */
        /*  - compute the variable WC_Flag for every port (This is where */
@@ -222,7 +217,6 @@ void cfm(struct s_smc *smc, int event)
        /*  - change the portstates */
        cem_priv_state (smc, event);
 
-       oldstate = smc->mib.fddiSMTCF_State ;
        do {
                DB_CFM("CFM : state %s%s event %s",
                       smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
@@ -250,18 +244,11 @@ void cfm(struct s_smc *smc, int event)
        if (cond != smc->mib.fddiSMTPeerWrapFlag)
                smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
 
-#if    0
        /*
-        * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
+        * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired
         * to the primary path.
         */
-       /*
-        * path change
-        */
-       if (smc->mib.fddiSMTCF_State != oldstate) {
-               smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
-       }
-#endif
+
 #endif /* no SLIM_SMT */
 
        /*
index 02966d1..4cbb145 100644 (file)
 #include <linux/bitrev.h>
 #include <linux/etherdevice.h>
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)fplustm.c   1.32 99/02/23 (C) SK " ;
-#endif
-
 #ifndef UNUSED
 #ifdef  lint
 #define UNUSED(x)      (x) = (x)
index 3412e0f..1070390 100644 (file)
  *
  ******************************************************************************/
 
-#ifndef        lint
-static char const ID_sccs[] = "@(#)hwmtm.c     1.40 99/05/31 (C) SK" ;
-#endif
-
 #define        HWMTM
 
 #ifndef FDDI
index b8c59d8..774a6e3 100644 (file)
 #define KERNEL
 #include "h/smtstate.h"
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)smt.c       2.43 98/11/23 (C) SK " ;
-#endif
-
 /*
  * FC in SMbuf
  */
@@ -1561,7 +1557,7 @@ u_long smt_get_tid(struct s_smc *smc)
        return tid & 0x3fffffffL;
 }
 
-
+#ifdef LITTLE_ENDIAN
 /*
  * table of parameter lengths
  */
@@ -1641,6 +1637,7 @@ static const struct smt_pdef {
 } ;
 
 #define N_SMT_PLEN     ARRAY_SIZE(smt_pdef)
+#endif
 
 int smt_check_para(struct s_smc *smc, struct smt_header        *sm,
                   const u_short list[])
index b10a853..55115cf 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/pm_wakeup.h>
+#include <linux/notifier.h>
 
 #include "ipa_version.h"
 #include "gsi.h"
@@ -73,6 +74,8 @@ struct ipa {
        enum ipa_version version;
        struct platform_device *pdev;
        struct rproc *modem_rproc;
+       struct notifier_block nb;
+       void *notifier;
        struct ipa_smp2p *smp2p;
        struct ipa_clock *clock;
        atomic_t suspend_ref;
index ed10818..e34fe2d 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/if_rmnet.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+#include <linux/remoteproc/qcom_rproc.h>
 
 #include "ipa.h"
 #include "ipa_data.h"
@@ -311,43 +311,40 @@ static void ipa_modem_crashed(struct ipa *ipa)
                dev_err(dev, "error %d zeroing modem memory regions\n", ret);
 }
 
-static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
+                           void *data)
 {
-       struct ipa *ipa = data;
-       struct device *dev;
+       struct ipa *ipa = container_of(nb, struct ipa, nb);
+       struct qcom_ssr_notify_data *notify_data = data;
+       struct device *dev = &ipa->pdev->dev;
 
-       dev = &ipa->pdev->dev;
-       switch (event) {
-       case MODEM_STARTING:
+       switch (action) {
+       case QCOM_SSR_BEFORE_POWERUP:
                dev_info(dev, "received modem starting event\n");
                ipa_smp2p_notify_reset(ipa);
                break;
 
-       case MODEM_RUNNING:
+       case QCOM_SSR_AFTER_POWERUP:
                dev_info(dev, "received modem running event\n");
                break;
 
-       case MODEM_STOPPING:
-       case MODEM_CRASHED:
+       case QCOM_SSR_BEFORE_SHUTDOWN:
                dev_info(dev, "received modem %s event\n",
-                        event == MODEM_STOPPING ? "stopping"
-                                                : "crashed");
+                        notify_data->crashed ? "crashed" : "stopping");
                if (ipa->setup_complete)
                        ipa_modem_crashed(ipa);
                break;
 
-       case MODEM_OFFLINE:
+       case QCOM_SSR_AFTER_SHUTDOWN:
                dev_info(dev, "received modem offline event\n");
                break;
 
-       case MODEM_REMOVING:
-               dev_info(dev, "received modem stopping event\n");
-               break;
-
        default:
-               dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+               dev_err(dev, "received unrecognized event %lu\n", action);
                break;
        }
+
+       return NOTIFY_OK;
 }
 
 int ipa_modem_init(struct ipa *ipa, bool modem_init)
@@ -362,13 +359,30 @@ void ipa_modem_exit(struct ipa *ipa)
 
 int ipa_modem_config(struct ipa *ipa)
 {
-       return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
-                                       ipa);
+       void *notifier;
+
+       ipa->nb.notifier_call = ipa_modem_notify;
+
+       notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
+       if (IS_ERR(notifier))
+               return PTR_ERR(notifier);
+
+       ipa->notifier = notifier;
+
+       return 0;
 }
 
 void ipa_modem_deconfig(struct ipa *ipa)
 {
-       qcom_deregister_ipa_notify(ipa->modem_rproc);
+       struct device *dev = &ipa->pdev->dev;
+       int ret;
+
+       ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
+       if (ret)
+               dev_err(dev, "error %d unregistering notifier", ret);
+
+       ipa->notifier = NULL;
+       memset(&ipa->nb, 0, sizeof(ipa->nb));
 }
 
 int ipa_modem_setup(struct ipa *ipa)
index 15e87c0..5bca94c 100644 (file)
@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
        kfree(port);
 }
 
+#define IPVLAN_ALWAYS_ON_OFLOADS \
+       (NETIF_F_SG | NETIF_F_HW_CSUM | \
+        NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
+
+#define IPVLAN_ALWAYS_ON \
+       (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+
 #define IPVLAN_FEATURES \
-       (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+       (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
         NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
         NETIF_F_GRO | NETIF_F_RXCSUM | \
         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
 
+       /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
+
 #define IPVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
        dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
                     (phy_dev->state & IPVLAN_STATE_MASK);
        dev->features = phy_dev->features & IPVLAN_FEATURES;
-       dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+       dev->features |= IPVLAN_ALWAYS_ON;
+       dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
+       dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
        dev->hw_enc_features |= dev->features;
        dev->gso_max_size = phy_dev->gso_max_size;
        dev->gso_max_segs = phy_dev->gso_max_segs;
@@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-       return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+       features |= NETIF_F_ALL_FOR_ALL;
+       features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+       features = netdev_increment_features(ipvlan->phy_dev->features,
+                                            features, features);
+       features |= IPVLAN_ALWAYS_ON;
+       features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
+
+       return features;
 }
 
 static void ipvlan_change_rx_flags(struct net_device *dev, int change)
@@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
 
        case NETDEV_FEAT_CHANGE:
                list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
                        ipvlan->dev->gso_max_size = dev->gso_max_size;
                        ipvlan->dev->gso_max_segs = dev->gso_max_segs;
-                       netdev_features_change(ipvlan->dev);
+                       netdev_update_features(ipvlan->dev);
                }
                break;
 
index a7610eb..1901ba2 100644 (file)
@@ -208,13 +208,6 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
                              MV_V2_TEMP_CTRL_MASK, val);
 }
 
-static void mv3310_hwmon_disable(void *data)
-{
-       struct phy_device *phydev = data;
-
-       mv3310_hwmon_config(phydev, false);
-}
-
 static int mv3310_hwmon_probe(struct phy_device *phydev)
 {
        struct device *dev = &phydev->mdio.dev;
@@ -238,10 +231,6 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
        if (ret)
                return ret;
 
-       ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev);
-       if (ret)
-               return ret;
-
        priv->hwmon_dev = devm_hwmon_device_register_with_info(dev,
                                priv->hwmon_name, phydev,
                                &mv3310_hwmon_chip_info, NULL);
@@ -426,6 +415,11 @@ static int mv3310_probe(struct phy_device *phydev)
        return phy_sfp_probe(phydev, &mv3310_sfp_ops);
 }
 
+static void mv3310_remove(struct phy_device *phydev)
+{
+       mv3310_hwmon_config(phydev, false);
+}
+
 static int mv3310_suspend(struct phy_device *phydev)
 {
        return mv3310_power_down(phydev);
@@ -784,6 +778,7 @@ static struct phy_driver mv3310_drivers[] = {
                .read_status    = mv3310_read_status,
                .get_tunable    = mv3310_get_tunable,
                .set_tunable    = mv3310_set_tunable,
+               .remove         = mv3310_remove,
        },
        {
                .phy_id         = MARVELL_PHY_ID_88E2110,
@@ -798,6 +793,7 @@ static struct phy_driver mv3310_drivers[] = {
                .read_status    = mv3310_read_status,
                .get_tunable    = mv3310_get_tunable,
                .set_tunable    = mv3310_set_tunable,
+               .remove         = mv3310_remove,
        },
 };
 
index 1b95235..57d4464 100644 (file)
@@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
        if (c45_ids)
                dev->c45_ids = *c45_ids;
        dev->irq = bus->irq[addr];
+
        dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
+       device_initialize(&mdiodev->dev);
 
        dev->state = PHY_DOWN;
 
@@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
                ret = phy_request_driver_module(dev, phy_id);
        }
 
-       if (!ret) {
-               device_initialize(&mdiodev->dev);
-       } else {
-               kfree(dev);
+       if (ret) {
+               put_device(&mdiodev->dev);
                dev = ERR_PTR(ret);
        }
 
index 7d39f99..2b02fef 100644 (file)
@@ -1504,7 +1504,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
 
        sa->sa_family = dev->type;
 
-       ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data);
+       ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
        if (ret < 0) {
                if (tp->version == RTL_VER_01) {
                        ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
index 6fa8fe5..0ada48e 100644 (file)
@@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi)
        if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
                return;
 
-       speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
-                                                 speed));
+       virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
        if (ethtool_validate_speed(speed))
                vi->speed = speed;
-       duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
-                                                 duplex));
+
+       virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
        if (ethtool_validate_duplex(duplex))
                vi->duplex = duplex;
 }
index ca395f9..2818015 100644 (file)
@@ -886,7 +886,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 
                        switch (protocol) {
                        case IPPROTO_TCP:
-                               ctx->l4_hdr_size = tcp_hdrlen(skb);
+                               ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
+                                                  tcp_hdrlen(skb);
                                break;
                        case IPPROTO_UDP:
                                ctx->l4_hdr_size = sizeof(struct udphdr);
index 7bcee41..3ca4daf 100644 (file)
@@ -295,14 +295,13 @@ static int dlci_close(struct net_device *dev)
 {
        struct dlci_local       *dlp;
        struct frad_local       *flp;
-       int                     err;
 
        netif_stop_queue(dev);
 
        dlp = netdev_priv(dev);
 
        flp = netdev_priv(dlp->slave);
-       err = (*flp->deactivate)(dlp->slave, dev);
+       (*flp->deactivate)(dlp->slave, dev);
 
        return 0;
 }
index dfc1677..386ed2a 100644 (file)
@@ -230,6 +230,7 @@ static void hdlc_setup_dev(struct net_device *dev)
        dev->max_mtu             = HDLC_MAX_MTU;
        dev->type                = ARPHRD_RAWHDLC;
        dev->hard_header_len     = 16;
+       dev->needed_headroom     = 0;
        dev->addr_len            = 0;
        dev->header_ops          = &hdlc_null_ops;
 }
index f70336b..f52b9fe 100644 (file)
@@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        int result;
 
+       /* There should be a pseudo header of 1 byte added by upper layers.
+        * Check to make sure it is there before reading it.
+        */
+       if (skb->len < 1) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
 
-       /* X.25 to LAPB */
        switch (skb->data[0]) {
        case X25_IFACE_DATA:    /* Data to be transmitted */
                skb_pull(skb, 1);
@@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
                        return result;
 
                memcpy(&state(hdlc)->settings, &new_settings, size);
+
+               /* There's no header_ops so hard_header_len should be 0. */
+               dev->hard_header_len = 0;
+               /* When transmitting data:
+                * first we'll remove a pseudo header of 1 byte,
+                * then we'll prepend an LAPB header of at most 3 bytes.
+                */
+               dev->needed_headroom = 3 - 1;
+
                dev->type = ARPHRD_X25;
                call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
                netif_dormant_off(dev);
index b286843..1ea15f2 100644 (file)
@@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
        if (!netif_running(dev))
                goto drop;
 
+       /* There should be a pseudo header of 1 byte added by upper layers.
+        * Check to make sure it is there before reading it.
+        */
+       if (skb->len < 1)
+               goto drop;
+
        switch (skb->data[0]) {
        case X25_IFACE_DATA:
                break;
@@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev)
        dev->netdev_ops      = &lapbeth_netdev_ops;
        dev->needs_free_netdev = true;
        dev->type            = ARPHRD_X25;
+       dev->hard_header_len = 0;
        dev->mtu             = 1000;
        dev->addr_len        = 0;
 }
@@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev)
         * then this driver prepends a length field of 2 bytes,
         * then the underlying Ethernet device prepends its own header.
         */
-       ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+       ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+                                          + dev->needed_headroom;
 
        lapbeth = netdev_priv(ndev);
        lapbeth->axdev = ndev;
index 84640a0..de79844 100644 (file)
@@ -307,6 +307,14 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* There should be a pseudo header of 1 byte added by upper layers.
+        * Check to make sure it is there before reading it.
+        */
+       if (skb->len < 1) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        switch (skb->data[0]) {
        case X25_IFACE_DATA:
                break;
@@ -752,6 +760,12 @@ static void x25_asy_setup(struct net_device *dev)
        dev->type               = ARPHRD_X25;
        dev->tx_queue_len       = 10;
 
+       /* When transmitting data:
+        * first this driver removes a pseudo header of 1 byte,
+        * then the lapb module prepends an LAPB header of at most 3 bytes.
+        */
+       dev->needed_headroom    = 3 - 1;
+
        /* New-style flags. */
        dev->flags              = IFF_NOARP;
 }
index 7948a2d..2ff0080 100644 (file)
@@ -150,17 +150,17 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
 void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
 void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
 
-static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
+static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, const u8 __iomem *addr)
 {
        return ioread8(addr);
 }
 
-static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, __le16 __iomem *addr)
+static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, const __le16 __iomem *addr)
 {
        return ioread16(addr);
 }
 
-static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, __le32 __iomem *addr)
+static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, const __le32 __iomem *addr)
 {
        return ioread32(addr);
 }
index 423f9b8..3185efe 100644 (file)
@@ -1205,7 +1205,7 @@ int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
                               ndev->peer_reg->spad);
 }
 
-static u64 xeon_db_ioread(void __iomem *mmio)
+static u64 xeon_db_ioread(const void __iomem *mmio)
 {
        return (u64)ioread16(mmio);
 }
index 2bc5d83..dea9398 100644 (file)
@@ -91,7 +91,7 @@
 #define GEN3_DB_TOTAL_SHIFT            33
 #define GEN3_SPAD_COUNT                        16
 
-static inline u64 gen3_db_ioread(void __iomem *mmio)
+static inline u64 gen3_db_ioread(const void __iomem *mmio)
 {
        return ioread64(mmio);
 }
index d61fcd9..05e2335 100644 (file)
@@ -103,7 +103,7 @@ struct intel_ntb_dev;
 struct intel_ntb_reg {
        int (*poll_link)(struct intel_ntb_dev *ndev);
        int (*link_is_up)(struct intel_ntb_dev *ndev);
-       u64 (*db_ioread)(void __iomem *mmio);
+       u64 (*db_ioread)(const void __iomem *mmio);
        void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
        unsigned long                   ntb_ctl;
        resource_size_t                 db_size;
index 412d21d..0ff610e 100644 (file)
@@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
 {
        struct btt *btt = bdev->bd_disk->private_data;
        int rc;
-       unsigned int len;
 
-       len = hpage_nr_pages(page) * PAGE_SIZE;
-       rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
+       rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
        if (rc == 0)
                page_endio(page, op_is_write(op), 0);
 
index 09087c3..9552656 100644 (file)
@@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
                dimm_name = "bus";
        }
 
+       /* Validate command family support against bus declared support */
        if (cmd == ND_CMD_CALL) {
+               unsigned long *mask;
+
                if (copy_from_user(&pkg, p, sizeof(pkg)))
                        return -EFAULT;
+
+               if (nvdimm) {
+                       if (pkg.nd_family > NVDIMM_FAMILY_MAX)
+                               return -EINVAL;
+                       mask = &nd_desc->dimm_family_mask;
+               } else {
+                       if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX)
+                               return -EINVAL;
+                       mask = &nd_desc->bus_family_mask;
+               }
+
+               if (!test_bit(pkg.nd_family, mask))
+                       return -EINVAL;
        }
 
        if (!desc ||
index fe9bd6f..c21ba06 100644 (file)
@@ -4,6 +4,7 @@
  */
 #include <linux/libnvdimm.h>
 #include <linux/badblocks.h>
+#include <linux/suspend.h>
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/blkdev.h>
@@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = {
        .attrs = nvdimm_bus_attributes,
 };
 
+static ssize_t capability_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+       enum nvdimm_fwa_capability cap;
+
+       if (!nd_desc->fw_ops)
+               return -EOPNOTSUPP;
+
+       nvdimm_bus_lock(dev);
+       cap = nd_desc->fw_ops->capability(nd_desc);
+       nvdimm_bus_unlock(dev);
+
+       switch (cap) {
+       case NVDIMM_FWA_CAP_QUIESCE:
+               return sprintf(buf, "quiesce\n");
+       case NVDIMM_FWA_CAP_LIVE:
+               return sprintf(buf, "live\n");
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static DEVICE_ATTR_RO(capability);
+
+static ssize_t activate_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+       enum nvdimm_fwa_capability cap;
+       enum nvdimm_fwa_state state;
+
+       if (!nd_desc->fw_ops)
+               return -EOPNOTSUPP;
+
+       nvdimm_bus_lock(dev);
+       cap = nd_desc->fw_ops->capability(nd_desc);
+       state = nd_desc->fw_ops->activate_state(nd_desc);
+       nvdimm_bus_unlock(dev);
+
+       if (cap < NVDIMM_FWA_CAP_QUIESCE)
+               return -EOPNOTSUPP;
+
+       switch (state) {
+       case NVDIMM_FWA_IDLE:
+               return sprintf(buf, "idle\n");
+       case NVDIMM_FWA_BUSY:
+               return sprintf(buf, "busy\n");
+       case NVDIMM_FWA_ARMED:
+               return sprintf(buf, "armed\n");
+       case NVDIMM_FWA_ARM_OVERFLOW:
+               return sprintf(buf, "overflow\n");
+       default:
+               return -ENXIO;
+       }
+}
+
+static int exec_firmware_activate(void *data)
+{
+       struct nvdimm_bus_descriptor *nd_desc = data;
+
+       return nd_desc->fw_ops->activate(nd_desc);
+}
+
+static ssize_t activate_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t len)
+{
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+       enum nvdimm_fwa_state state;
+       bool quiesce;
+       ssize_t rc;
+
+       if (!nd_desc->fw_ops)
+               return -EOPNOTSUPP;
+
+       if (sysfs_streq(buf, "live"))
+               quiesce = false;
+       else if (sysfs_streq(buf, "quiesce"))
+               quiesce = true;
+       else
+               return -EINVAL;
+
+       nvdimm_bus_lock(dev);
+       state = nd_desc->fw_ops->activate_state(nd_desc);
+
+       switch (state) {
+       case NVDIMM_FWA_BUSY:
+               rc = -EBUSY;
+               break;
+       case NVDIMM_FWA_ARMED:
+       case NVDIMM_FWA_ARM_OVERFLOW:
+               if (quiesce)
+                       rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
+               else
+                       rc = nd_desc->fw_ops->activate(nd_desc);
+               break;
+       case NVDIMM_FWA_IDLE:
+       default:
+               rc = -ENXIO;
+       }
+       nvdimm_bus_unlock(dev);
+
+       if (rc == 0)
+               rc = len;
+       return rc;
+}
+
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, typeof(*dev), kobj);
+       struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+       enum nvdimm_fwa_capability cap;
+
+       /*
+        * Both 'activate' and 'capability' disappear when no ops
+        * detected, or a negative capability is indicated.
+        */
+       if (!nd_desc->fw_ops)
+               return 0;
+
+       nvdimm_bus_lock(dev);
+       cap = nd_desc->fw_ops->capability(nd_desc);
+       nvdimm_bus_unlock(dev);
+
+       if (cap < NVDIMM_FWA_CAP_QUIESCE)
+               return 0;
+
+       return a->mode;
+}
+static struct attribute *nvdimm_bus_firmware_attributes[] = {
+       &dev_attr_activate.attr,
+       &dev_attr_capability.attr,
+       NULL,
+};
+
+static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
+       .name = "firmware",
+       .attrs = nvdimm_bus_firmware_attributes,
+       .is_visible = nvdimm_bus_firmware_visible,
+};
+
 const struct attribute_group *nvdimm_bus_attribute_groups[] = {
        &nvdimm_bus_attribute_group,
+       &nvdimm_bus_firmware_attribute_group,
        NULL,
 };
 
index b7b77e8..61374de 100644 (file)
@@ -363,14 +363,14 @@ __weak ssize_t security_show(struct device *dev,
 {
        struct nvdimm *nvdimm = to_nvdimm(dev);
 
+       if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
+               return sprintf(buf, "overwrite\n");
        if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
                return sprintf(buf, "disabled\n");
        if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
                return sprintf(buf, "unlocked\n");
        if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
                return sprintf(buf, "locked\n");
-       if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
-               return sprintf(buf, "overwrite\n");
        return -ENOTTY;
 }
 
@@ -446,9 +446,124 @@ static const struct attribute_group nvdimm_attribute_group = {
        .is_visible = nvdimm_visible,
 };
 
+static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct nvdimm *nvdimm = to_nvdimm(dev);
+       enum nvdimm_fwa_result result;
+
+       if (!nvdimm->fw_ops)
+               return -EOPNOTSUPP;
+
+       nvdimm_bus_lock(dev);
+       result = nvdimm->fw_ops->activate_result(nvdimm);
+       nvdimm_bus_unlock(dev);
+
+       switch (result) {
+       case NVDIMM_FWA_RESULT_NONE:
+               return sprintf(buf, "none\n");
+       case NVDIMM_FWA_RESULT_SUCCESS:
+               return sprintf(buf, "success\n");
+       case NVDIMM_FWA_RESULT_FAIL:
+               return sprintf(buf, "fail\n");
+       case NVDIMM_FWA_RESULT_NOTSTAGED:
+               return sprintf(buf, "not_staged\n");
+       case NVDIMM_FWA_RESULT_NEEDRESET:
+               return sprintf(buf, "need_reset\n");
+       default:
+               return -ENXIO;
+       }
+}
+static DEVICE_ATTR_ADMIN_RO(result);
+
+static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct nvdimm *nvdimm = to_nvdimm(dev);
+       enum nvdimm_fwa_state state;
+
+       if (!nvdimm->fw_ops)
+               return -EOPNOTSUPP;
+
+       nvdimm_bus_lock(dev);
+       state = nvdimm->fw_ops->activate_state(nvdimm);
+       nvdimm_bus_unlock(dev);
+
+       switch (state) {
+       case NVDIMM_FWA_IDLE:
+               return sprintf(buf, "idle\n");
+       case NVDIMM_FWA_BUSY:
+               return sprintf(buf, "busy\n");
+       case NVDIMM_FWA_ARMED:
+               return sprintf(buf, "armed\n");
+       default:
+               return -ENXIO;
+       }
+}
+
+static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t len)
+{
+       struct nvdimm *nvdimm = to_nvdimm(dev);
+       enum nvdimm_fwa_trigger arg;
+       int rc;
+
+       if (!nvdimm->fw_ops)
+               return -EOPNOTSUPP;
+
+       if (sysfs_streq(buf, "arm"))
+               arg = NVDIMM_FWA_ARM;
+       else if (sysfs_streq(buf, "disarm"))
+               arg = NVDIMM_FWA_DISARM;
+       else
+               return -EINVAL;
+
+       nvdimm_bus_lock(dev);
+       rc = nvdimm->fw_ops->arm(nvdimm, arg);
+       nvdimm_bus_unlock(dev);
+
+       if (rc < 0)
+               return rc;
+       return len;
+}
+static DEVICE_ATTR_ADMIN_RW(activate);
+
+static struct attribute *nvdimm_firmware_attributes[] = {
+       &dev_attr_activate.attr,
+       &dev_attr_result.attr,
+};
+
+static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, typeof(*dev), kobj);
+       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+       struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+       struct nvdimm *nvdimm = to_nvdimm(dev);
+       enum nvdimm_fwa_capability cap;
+
+       if (!nd_desc->fw_ops)
+               return 0;
+       if (!nvdimm->fw_ops)
+               return 0;
+
+       nvdimm_bus_lock(dev);
+       cap = nd_desc->fw_ops->capability(nd_desc);
+       nvdimm_bus_unlock(dev);
+
+       if (cap < NVDIMM_FWA_CAP_QUIESCE)
+               return 0;
+
+       return a->mode;
+}
+
+static const struct attribute_group nvdimm_firmware_attribute_group = {
+       .name = "firmware",
+       .attrs = nvdimm_firmware_attributes,
+       .is_visible = nvdimm_firmware_visible,
+};
+
 static const struct attribute_group *nvdimm_attribute_groups[] = {
        &nd_device_attribute_group,
        &nvdimm_attribute_group,
+       &nvdimm_firmware_attribute_group,
        NULL,
 };
 
@@ -467,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
                void *provider_data, const struct attribute_group **groups,
                unsigned long flags, unsigned long cmd_mask, int num_flush,
                struct resource *flush_wpq, const char *dimm_id,
-               const struct nvdimm_security_ops *sec_ops)
+               const struct nvdimm_security_ops *sec_ops,
+               const struct nvdimm_fw_ops *fw_ops)
 {
        struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
        struct device *dev;
@@ -497,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
        dev->devt = MKDEV(nvdimm_major, nvdimm->id);
        dev->groups = groups;
        nvdimm->sec.ops = sec_ops;
+       nvdimm->fw_ops = fw_ops;
        nvdimm->sec.overwrite_tmo = 0;
        INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
        /*
index ae155e8..6da67f4 100644 (file)
@@ -1309,7 +1309,7 @@ static ssize_t resource_show(struct device *dev,
                return -ENXIO;
        return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
 }
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
 
 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
        4096, 4104, 4160, 4224, 0 };
index ddb9d97..564faa3 100644 (file)
@@ -45,6 +45,7 @@ struct nvdimm {
                struct kernfs_node *overwrite_state;
        } sec;
        struct delayed_work dwork;
+       const struct nvdimm_fw_ops *fw_ops;
 };
 
 static inline unsigned long nvdimm_security_flags(
index 34db557..3e11ef8 100644 (file)
@@ -218,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
 
        return rc;
 }
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
 
 static ssize_t size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
index 94790e6..fab29b5 100644 (file)
@@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
        blk_status_t rc;
 
        if (op_is_write(op))
-               rc = pmem_do_write(pmem, page, 0, sector,
-                                  hpage_nr_pages(page) * PAGE_SIZE);
+               rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
        else
-               rc = pmem_do_read(pmem, page, 0, sector,
-                                  hpage_nr_pages(page) * PAGE_SIZE);
+               rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
        /*
         * The ->rw_page interface is subtle and tricky.  The core
         * retries on any error, so we can only invoke page_endio() in
index c3237c2..ef23119 100644 (file)
@@ -605,7 +605,7 @@ static ssize_t resource_show(struct device *dev,
 
        return sprintf(buf, "%#llx\n", nd_region->ndr_start);
 }
-static DEVICE_ATTR(resource, 0400, resource_show, NULL);
+static DEVICE_ATTR_ADMIN_RO(resource);
 
 static ssize_t persistence_domain_show(struct device *dev,
                struct device_attribute *attr, char *buf)
index 4cef69b..4b80150 100644 (file)
@@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
        else
                dev_dbg(&nvdimm->dev, "overwrite completed\n");
 
-       if (nvdimm->sec.overwrite_state)
-               sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+       /*
+        * Mark the overwrite work done and update dimm security flags,
+        * then send a sysfs event notification to wake up userspace
+        * poll threads to picked up the changed state.
+        */
        nvdimm->sec.overwrite_tmo = 0;
        clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
        clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
-       put_device(&nvdimm->dev);
        nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
-       nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+       nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+       if (nvdimm->sec.overwrite_state)
+               sysfs_notify_dirent(nvdimm->sec.overwrite_state);
+       put_device(&nvdimm->dev);
 }
 
 void nvdimm_security_overwrite_query(struct work_struct *work)
index 5e3d07b..726c735 100644 (file)
@@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
                goto out_err;
        }
 
-       virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+       virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
                        start, &vpmem->start);
-       virtio_cread(vpmem->vdev, struct virtio_pmem_config,
+       virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
                        size, &vpmem->size);
 
        res.start = vpmem->start;
index 590493e..da4f734 100644 (file)
@@ -128,15 +128,29 @@ static unsigned int of_bus_pci_get_flags(const __be32 *addr)
  * PCI bus specific translator
  */
 
+static bool of_node_is_pcie(struct device_node *np)
+{
+       bool is_pcie = of_node_name_eq(np, "pcie");
+
+       if (is_pcie)
+               pr_warn_once("%pOF: Missing device_type\n", np);
+
+       return is_pcie;
+}
+
 static int of_bus_pci_match(struct device_node *np)
 {
        /*
         * "pciex" is PCI Express
         * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
         * "ht" is hypertransport
+        *
+        * If none of the device_type match, and that the node name is
+        * "pcie", accept the device as PCI (with a warning).
         */
        return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
-               of_node_is_type(np, "vci") || of_node_is_type(np, "ht");
+               of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
+               of_node_is_pcie(np);
 }
 
 static void of_bus_pci_count_cells(struct device_node *np,
@@ -985,6 +999,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
                        /* Don't error out as we'd break some existing DTs */
                        continue;
                }
+               if (range.cpu_addr == OF_BAD_ADDR) {
+                       pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+                              range.bus_addr, node);
+                       continue;
+               }
                dma_offset = range.cpu_addr - range.bus_addr;
 
                /* Take lower and upper limits */
index 9d7fb45..9668ea0 100644 (file)
@@ -893,8 +893,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
                 * have OPP table for the device, while others don't and
                 * opp_set_rate() just needs to behave like clk_set_rate().
                 */
-               if (!_get_opp_count(opp_table))
-                       return 0;
+               if (!_get_opp_count(opp_table)) {
+                       ret = 0;
+                       goto put_opp_table;
+               }
 
                if (!opp_table->required_opp_tables && !opp_table->regulators &&
                    !opp_table->paths) {
@@ -905,7 +907,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 
                ret = _set_opp_bw(opp_table, NULL, dev, true);
                if (ret)
-                       return ret;
+                       goto put_opp_table;
 
                if (opp_table->regulator_enabled) {
                        regulator_disable(opp_table->regulators[0]);
@@ -932,10 +934,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 
        /* Return early if nothing to do */
        if (old_freq == freq) {
-               dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
-                       __func__, freq);
-               ret = 0;
-               goto put_opp_table;
+               if (!opp_table->required_opp_tables && !opp_table->regulators &&
+                   !opp_table->paths) {
+                       dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+                               __func__, freq);
+                       ret = 0;
+                       goto put_opp_table;
+               }
        }
 
        /*
index 5368452..d4314fb 100644 (file)
@@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
        ** (one that doesn't overlap memory or LMMIO space) in the
        ** IBASE and IMASK registers.
        */
-       ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
+       ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
        iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
 
        if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
index 64ebed1..f357f9a 100644 (file)
@@ -556,13 +556,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
                return -1;
 
        for (i = 0; i < num_clients; i++) {
-               if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
-                   clients[i]->dma_ops == &dma_virt_ops) {
+#ifdef CONFIG_DMA_VIRT_OPS
+               if (clients[i]->dma_ops == &dma_virt_ops) {
                        if (verbose)
                                dev_warn(clients[i],
                                         "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
                        return -1;
                }
+#endif
 
                pci_client = find_parent_pci_dev(clients[i]);
                if (!pci_client) {
@@ -842,9 +843,10 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
         * this should never happen because it will be prevented
         * by the check in pci_p2pdma_distance_many()
         */
-       if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
-                        dev->dma_ops == &dma_virt_ops))
+#ifdef CONFIG_DMA_VIRT_OPS
+       if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops))
                return 0;
+#endif
 
        for_each_sg(sg, s, nents, i) {
                paddr = sg_phys(s);
index 5a0c8e8..7efdfb4 100644 (file)
  * @pctrldev: pinctrl handle
  * @chip: gpio chip
  * @lock: spinlock to protect registers
+ * @clk: clock control
  * @soc: reference to soc_data
  * @base: pinctrl register base address
+ * @irq_chip: IRQ chip information
+ * @num_irq: number of possible interrupts
+ * @irq: interrupt numbers
  */
 struct owl_pinctrl {
        struct device *dev;
index fa32c3e..7efe6db 100644 (file)
@@ -46,6 +46,7 @@
 #define SCU634         0x634 /* Disable GPIO Internal Pull-Down #5 */
 #define SCU638         0x638 /* Disable GPIO Internal Pull-Down #6 */
 #define SCU694         0x694 /* Multi-function Pin Control #25 */
+#define SCU69C         0x69C /* Multi-function Pin Control #27 */
 #define SCUC20         0xC20 /* PCIE configuration Setting Control */
 
 #define ASPEED_G6_NR_PINS 256
@@ -819,11 +820,13 @@ FUNC_DECL_2(PWM14, PWM14G0, PWM14G1);
 #define Y23 127
 SIG_EXPR_LIST_DECL_SEMG(Y23, PWM15, PWM15G1, PWM15, SIG_DESC_SET(SCU41C, 31));
 SIG_EXPR_LIST_DECL_SESG(Y23, THRUOUT3, THRU3, SIG_DESC_SET(SCU4BC, 31));
-PIN_DECL_2(Y23, GPIOP7, PWM15, THRUOUT3);
+SIG_EXPR_LIST_DECL_SESG(Y23, HEARTBEAT, HEARTBEAT, SIG_DESC_SET(SCU69C, 31));
+PIN_DECL_3(Y23, GPIOP7, PWM15, THRUOUT3, HEARTBEAT);
 GROUP_DECL(PWM15G1, Y23);
 FUNC_DECL_2(PWM15, PWM15G0, PWM15G1);
 
 FUNC_GROUP_DECL(THRU3, AB24, Y23);
+FUNC_GROUP_DECL(HEARTBEAT, Y23);
 
 #define AA25 128
 SSSF_PIN_DECL(AA25, GPIOQ0, TACH0, SIG_DESC_SET(SCU430, 0));
@@ -1920,6 +1923,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
        ASPEED_PINCTRL_GROUP(GPIU5),
        ASPEED_PINCTRL_GROUP(GPIU6),
        ASPEED_PINCTRL_GROUP(GPIU7),
+       ASPEED_PINCTRL_GROUP(HEARTBEAT),
        ASPEED_PINCTRL_GROUP(HVI3C3),
        ASPEED_PINCTRL_GROUP(HVI3C4),
        ASPEED_PINCTRL_GROUP(I2C1),
@@ -2158,6 +2162,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(GPIU5),
        ASPEED_PINCTRL_FUNC(GPIU6),
        ASPEED_PINCTRL_FUNC(GPIU7),
+       ASPEED_PINCTRL_FUNC(HEARTBEAT),
        ASPEED_PINCTRL_FUNC(I2C1),
        ASPEED_PINCTRL_FUNC(I2C10),
        ASPEED_PINCTRL_FUNC(I2C11),
index b625a65..53f3f8a 100644 (file)
@@ -76,6 +76,9 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
 {
        int ret;
 
+       pr_debug("Enabling signal %s for %s\n", expr->signal,
+                expr->function);
+
        ret = aspeed_sig_expr_eval(ctx, expr, true);
        if (ret < 0)
                return ret;
@@ -91,6 +94,9 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
 {
        int ret;
 
+       pr_debug("Disabling signal %s for %s\n", expr->signal,
+                expr->function);
+
        ret = aspeed_sig_expr_eval(ctx, expr, true);
        if (ret < 0)
                return ret;
@@ -229,7 +235,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
                const struct aspeed_sig_expr **funcs;
                const struct aspeed_sig_expr ***prios;
 
-               pr_debug("Muxing pin %d for %s\n", pin, pfunc->name);
+               pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
 
                if (!pdesc)
                        return -EINVAL;
@@ -269,6 +275,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
                ret = aspeed_sig_expr_enable(&pdata->pinmux, expr);
                if (ret)
                        return ret;
+
+               pr_debug("Muxed pin %s as %s for %s\n", pdesc->name, expr->signal,
+                        expr->function);
        }
 
        return 0;
@@ -317,6 +326,8 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
        if (!prios)
                return -ENXIO;
 
+       pr_debug("Muxing pin %s for GPIO\n", pdesc->name);
+
        /* Disable any functions of higher priority than GPIO */
        while ((funcs = *prios)) {
                if (aspeed_gpio_in_exprs(funcs))
@@ -346,14 +357,22 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
         * lowest-priority signal type. As such it has no associated
         * expression.
         */
-       if (!expr)
+       if (!expr) {
+               pr_debug("Muxed pin %s as GPIO\n", pdesc->name);
                return 0;
+       }
 
        /*
         * If GPIO is not the lowest priority signal type, assume there is only
         * one expression defined to enable the GPIO function
         */
-       return aspeed_sig_expr_enable(&pdata->pinmux, expr);
+       ret = aspeed_sig_expr_enable(&pdata->pinmux, expr);
+       if (ret)
+               return ret;
+
+       pr_debug("Muxed pin %s as %s\n", pdesc->name, expr->signal);
+
+       return 0;
 }
 
 int aspeed_pinctrl_probe(struct platform_device *pdev,
index 71e6661..9ab1f42 100644 (file)
@@ -59,7 +59,7 @@
 #define BCM281XX_HDMI_PIN_REG_MODE_MASK                0x0010
 #define BCM281XX_HDMI_PIN_REG_MODE_SHIFT       4
 
-/**
+/*
  * bcm281xx_pin_type - types of pin register
  */
 enum bcm281xx_pin_type {
@@ -73,7 +73,7 @@ static enum bcm281xx_pin_type std_pin = BCM281XX_PIN_TYPE_STD;
 static enum bcm281xx_pin_type i2c_pin = BCM281XX_PIN_TYPE_I2C;
 static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
 
-/**
+/*
  * bcm281xx_pin_function- define pin function
  */
 struct bcm281xx_pin_function {
@@ -82,7 +82,7 @@ struct bcm281xx_pin_function {
        const unsigned ngroups;
 };
 
-/**
+/*
  * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data
  * @reg_base - base of pinctrl registers
  */
index a38f0d5..e2bd2dc 100644 (file)
@@ -131,7 +131,7 @@ static inline unsigned iproc_pin_to_gpio(unsigned pin)
  *  iproc_set_bit - set or clear one bit (corresponding to the GPIO pin) in a
  *  Iproc GPIO register
  *
- *  @iproc_gpio: Iproc GPIO device
+ *  @chip: Iproc GPIO device
  *  @reg: register offset
  *  @gpio: GPIO pin
  *  @set: set or clear
index bed0124..a00a42a 100644 (file)
@@ -154,15 +154,9 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
                level &= readl(chip->base + NSP_GPIO_INT_MASK);
                int_bits = level | event;
 
-               for_each_set_bit(bit, &int_bits, gc->ngpio) {
-                       /*
-                        * Clear the interrupt before invoking the
-                        * handler, so we do not leave any window
-                        */
-                       writel(BIT(bit), chip->base + NSP_GPIO_EVENT);
+               for_each_set_bit(bit, &int_bits, gc->ngpio)
                        generic_handle_irq(
                                irq_linear_revmap(gc->irq.domain, bit));
-               }
        }
 
        return  int_bits ? IRQ_HANDLED : IRQ_NONE;
@@ -178,7 +172,7 @@ static void nsp_gpio_irq_ack(struct irq_data *d)
 
        trigger_type = irq_get_trigger_type(d->irq);
        if (trigger_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-               nsp_set_bit(chip, REG, NSP_GPIO_EVENT, gpio, val);
+               writel(val, chip->base + NSP_GPIO_EVENT);
 }
 
 /*
@@ -262,6 +256,12 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 
        nsp_set_bit(chip, REG, NSP_GPIO_EVENT_INT_POLARITY, gpio, falling);
        nsp_set_bit(chip, REG, NSP_GPIO_INT_POLARITY, gpio, level_low);
+
+       if (type & IRQ_TYPE_EDGE_BOTH)
+               irq_set_handler_locked(d, handle_edge_irq);
+       else
+               irq_set_handler_locked(d, handle_level_irq);
+
        raw_spin_unlock_irqrestore(&chip->lock, flags);
 
        dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio,
@@ -691,7 +691,7 @@ static int nsp_gpio_probe(struct platform_device *pdev)
                girq->num_parents = 0;
                girq->parents = NULL;
                girq->default_type = IRQ_TYPE_NONE;
-               girq->handler = handle_simple_irq;
+               girq->handler = handle_bad_irq;
        }
 
        ret = devm_gpiochip_add_data(dev, gc, chip);
index 821242b..3663d87 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pinctrl/machine.h>
 
 #ifdef CONFIG_GPIOLIB
+#include "../gpio/gpiolib.h"
 #include <asm-generic/gpio.h>
 #endif
 
@@ -161,7 +162,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
 /**
  * pin_get_name_from_id() - look up a pin name from a pin id
  * @pctldev: the pin control device to lookup the pin on
- * @name: the name of the pin to look up
+ * @pin: pin number/id to look up
  */
 const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
 {
@@ -577,7 +578,7 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_pins);
 /**
  * pinctrl_generic_get_group() - returns a pin group based on the number
  * @pctldev: pin controller device
- * @gselector: group number
+ * @selector: group number
  */
 struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
                                             unsigned int selector)
@@ -1329,7 +1330,7 @@ static void devm_pinctrl_release(struct device *dev, void *res)
 }
 
 /**
- * struct devm_pinctrl_get() - Resource managed pinctrl_get()
+ * devm_pinctrl_get() - Resource managed pinctrl_get()
  * @dev: the device to obtain the handle for
  *
  * If there is a need to explicitly destroy the returned struct pinctrl,
@@ -1451,7 +1452,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register_mappings);
 
 /**
  * pinctrl_unregister_mappings() - unregister a set of pin controller mappings
- * @maps: the pincontrol mappings table passed to pinctrl_register_mappings()
+ * @map: the pincontrol mappings table passed to pinctrl_register_mappings()
  *     when registering the mappings.
  */
 void pinctrl_unregister_mappings(const struct pinctrl_map *map)
@@ -1601,6 +1602,9 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *ops = pctldev->desc->pctlops;
        unsigned i, pin;
+       struct pinctrl_gpio_range *range;
+       unsigned int gpio_num;
+       struct gpio_chip *chip;
 
        seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
 
@@ -1618,6 +1622,23 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
 
                seq_printf(s, "pin %d (%s) ", pin, desc->name);
 
+#ifdef CONFIG_GPIOLIB
+               gpio_num = 0;
+               list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+                       if ((pin >= range->pin_base) &&
+                           (pin < (range->pin_base + range->npins))) {
+                               gpio_num = range->base + (pin - range->pin_base);
+                               break;
+                       }
+               }
+               chip = gpio_to_chip(gpio_num);
+               if (chip && chip->gpiodev && chip->gpiodev->base)
+                       seq_printf(s, "%u:%s ", gpio_num -
+                               chip->gpiodev->base, chip->label);
+               else
+                       seq_puts(s, "0:? ");
+#endif
+
                /* Driver-specific info per pin */
                if (ops->pin_dbg_show)
                        ops->pin_dbg_show(pctldev, s, pin);
@@ -2226,9 +2247,9 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register);
  * @dev: parent device for this pin controller
  * @pctldesc: descriptor for this pin controller
  * @driver_data: private pin controller data for this pin controller
+ * @pctldev: pin controller device
  *
- * Returns an error pointer if pincontrol register failed. Otherwise
- * it returns valid pinctrl handle.
+ * Returns zero on success or an error number on failure.
  *
  * The pinctrl device will be automatically released when the device is unbound.
  */
index c6fe7d6..5eff8c2 100644 (file)
@@ -17,7 +17,8 @@
  * struct pinctrl_dt_map - mapping table chunk parsed from device tree
  * @node: list node for struct pinctrl's @dt_maps field
  * @pctldev: the pin controller that allocated this struct, and will free it
- * @maps: the mapping table entries
+ * @map: the mapping table entries
+ * @num_maps: number of mapping table entries
  */
 struct pinctrl_dt_map {
        struct list_head node;
@@ -397,7 +398,7 @@ static int pinctrl_copy_args(const struct device_node *np,
  * @np: pointer to device node with the property
  * @list_name: property that contains the list
  * @index: index within the list
- * @out_arts: entries in the list pointed by index
+ * @out_args: entries in the list pointed by index
  *
  * Finds the selected element in a pinctrl array consisting of an index
  * within the controller and a number of u32 entries specified for each
index 4ca44dd..08fcf5c 100644 (file)
@@ -124,49 +124,49 @@ config PINCTRL_IMX7ULP
          Say Y here to enable the imx7ulp pinctrl driver
 
 config PINCTRL_IMX8MM
-       bool "IMX8MM pinctrl driver"
+       tristate "IMX8MM pinctrl driver"
        depends on ARCH_MXC
        select PINCTRL_IMX
        help
          Say Y here to enable the imx8mm pinctrl driver
 
 config PINCTRL_IMX8MN
-       bool "IMX8MN pinctrl driver"
+       tristate "IMX8MN pinctrl driver"
        depends on ARCH_MXC
        select PINCTRL_IMX
        help
          Say Y here to enable the imx8mn pinctrl driver
 
 config PINCTRL_IMX8MP
-       bool "IMX8MP pinctrl driver"
+       tristate "IMX8MP pinctrl driver"
        depends on ARCH_MXC
        select PINCTRL_IMX
        help
          Say Y here to enable the imx8mp pinctrl driver
 
 config PINCTRL_IMX8MQ
-       bool "IMX8MQ pinctrl driver"
+       tristate "IMX8MQ pinctrl driver"
        depends on ARCH_MXC
        select PINCTRL_IMX
        help
          Say Y here to enable the imx8mq pinctrl driver
 
 config PINCTRL_IMX8QM
-       bool "IMX8QM pinctrl driver"
+       tristate "IMX8QM pinctrl driver"
        depends on IMX_SCU && ARCH_MXC && ARM64
        select PINCTRL_IMX_SCU
        help
          Say Y here to enable the imx8qm pinctrl driver
 
 config PINCTRL_IMX8QXP
-       bool "IMX8QXP pinctrl driver"
+       tristate "IMX8QXP pinctrl driver"
        depends on IMX_SCU && ARCH_MXC && ARM64
        select PINCTRL_IMX_SCU
        help
          Say Y here to enable the imx8qxp pinctrl driver
 
 config PINCTRL_IMX8DXL
-       bool "IMX8DXL pinctrl driver"
+       tristate "IMX8DXL pinctrl driver"
        depends on IMX_SCU && ARCH_MXC && ARM64
        select PINCTRL_IMX_SCU
        help
index 1f81569..507e4af 100644 (file)
@@ -877,6 +877,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
 
        return pinctrl_enable(ipctl->pctl);
 }
+EXPORT_SYMBOL_GPL(imx_pinctrl_probe);
 
 static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
 {
@@ -896,3 +897,4 @@ const struct dev_pm_ops imx_pinctrl_pm_ops = {
        SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend,
                                        imx_pinctrl_resume)
 };
+EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops);
index 7f32e57..12b97da 100644 (file)
@@ -165,6 +165,7 @@ static const struct of_device_id imx8dxl_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8dxl-iomuxc", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8dxl_pinctrl_of_match);
 
 static int imx8dxl_pinctrl_probe(struct platform_device *pdev)
 {
@@ -191,3 +192,7 @@ static int __init imx8dxl_pinctrl_init(void)
        return platform_driver_register(&imx8dxl_pinctrl_driver);
 }
 arch_initcall(imx8dxl_pinctrl_init);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8DXL pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 6d1038a..31c5d88 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/platform_device.h>
@@ -326,6 +327,7 @@ static const struct of_device_id imx8mm_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8mm-iomuxc", .data = &imx8mm_pinctrl_info, },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8mm_pinctrl_of_match);
 
 static int imx8mm_pinctrl_probe(struct platform_device *pdev)
 {
@@ -346,3 +348,7 @@ static int __init imx8mm_pinctrl_init(void)
        return platform_driver_register(&imx8mm_pinctrl_driver);
 }
 arch_initcall(imx8mm_pinctrl_init);
+
+MODULE_AUTHOR("Bai Ping <ping.bai@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MM pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 100ed8c..14c9deb 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/platform_device.h>
@@ -326,6 +327,7 @@ static const struct of_device_id imx8mn_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8mn-iomuxc", .data = &imx8mn_pinctrl_info, },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8mn_pinctrl_of_match);
 
 static int imx8mn_pinctrl_probe(struct platform_device *pdev)
 {
@@ -346,3 +348,7 @@ static int __init imx8mn_pinctrl_init(void)
        return platform_driver_register(&imx8mn_pinctrl_driver);
 }
 arch_initcall(imx8mn_pinctrl_init);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MN pinctrl driver");
+MODULE_LICENSE("GPL v2");
index e3f644c..bf4bbb5 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/platform_device.h>
@@ -324,6 +325,7 @@ static const struct of_device_id imx8mp_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8mp_pinctrl_of_match);
 
 static int imx8mp_pinctrl_probe(struct platform_device *pdev)
 {
@@ -343,3 +345,7 @@ static int __init imx8mp_pinctrl_init(void)
        return platform_driver_register(&imx8mp_pinctrl_driver);
 }
 arch_initcall(imx8mp_pinctrl_init);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MP pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 50aa1c0..ae3ea5b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -329,6 +330,7 @@ static const struct of_device_id imx8mq_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8mq-iomuxc", .data = &imx8mq_pinctrl_info, },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8mq_pinctrl_of_match);
 
 static int imx8mq_pinctrl_probe(struct platform_device *pdev)
 {
@@ -350,3 +352,7 @@ static int __init imx8mq_pinctrl_init(void)
        return platform_driver_register(&imx8mq_pinctrl_driver);
 }
 arch_initcall(imx8mq_pinctrl_init);
+
+MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
+MODULE_DESCRIPTION("NXP i.MX8MQ pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 0b6029b..095acf4 100644 (file)
@@ -298,6 +298,7 @@ static const struct of_device_id imx8qm_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8qm-iomuxc", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8qm_pinctrl_of_match);
 
 static int imx8qm_pinctrl_probe(struct platform_device *pdev)
 {
@@ -324,3 +325,7 @@ static int __init imx8qm_pinctrl_init(void)
        return platform_driver_register(&imx8qm_pinctrl_driver);
 }
 arch_initcall(imx8qm_pinctrl_init);
+
+MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8QM pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 1131dc3..81ebd4c 100644 (file)
@@ -204,6 +204,7 @@ static const struct of_device_id imx8qxp_pinctrl_of_match[] = {
        { .compatible = "fsl,imx8qxp-iomuxc", },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, imx8qxp_pinctrl_of_match);
 
 static int imx8qxp_pinctrl_probe(struct platform_device *pdev)
 {
@@ -230,3 +231,7 @@ static int __init imx8qxp_pinctrl_init(void)
        return platform_driver_register(&imx8qxp_pinctrl_driver);
 }
 arch_initcall(imx8qxp_pinctrl_init);
+
+MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8QXP pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 23cf04b..9df45d3 100644 (file)
@@ -41,6 +41,7 @@ int imx_pinctrl_sc_ipc_init(struct platform_device *pdev)
 {
        return imx_scu_get_handle(&pinctrl_ipc_handle);
 }
+EXPORT_SYMBOL_GPL(imx_pinctrl_sc_ipc_init);
 
 int imx_pinconf_get_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
                        unsigned long *config)
@@ -66,6 +67,7 @@ int imx_pinconf_get_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(imx_pinconf_get_scu);
 
 int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
                        unsigned long *configs, unsigned num_configs)
@@ -101,6 +103,7 @@ int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(imx_pinconf_set_scu);
 
 void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl,
                               unsigned int *pin_id, struct imx_pin *pin,
@@ -119,3 +122,4 @@ void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl,
        dev_dbg(ipctl->dev, "%s: 0x%x 0x%08lx", info->pins[pin->pin].name,
                pin_scu->mux_mode, pin_scu->config);
 }
+EXPORT_SYMBOL_GPL(imx_pinctrl_parse_pin_scu);
index 787833e..b3e6060 100644 (file)
@@ -95,6 +95,14 @@ config PINCTRL_DENVERTON
          This pinctrl driver provides an interface that allows configuring
          of Intel Denverton SoC pins and using them as GPIOs.
 
+config PINCTRL_EMMITSBURG
+       tristate "Intel Emmitsburg pinctrl and GPIO driver"
+       depends on ACPI
+       select PINCTRL_INTEL
+       help
+         This pinctrl driver provides an interface that allows configuring
+         of Intel Emmitsburg pins and using them as GPIOs.
+
 config PINCTRL_GEMINILAKE
        tristate "Intel Gemini Lake SoC pinctrl and GPIO driver"
        depends on ACPI
index f6f63eb..1c1c316 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_BROXTON)         += pinctrl-broxton.o
 obj-$(CONFIG_PINCTRL_CANNONLAKE)       += pinctrl-cannonlake.o
 obj-$(CONFIG_PINCTRL_CEDARFORK)                += pinctrl-cedarfork.o
 obj-$(CONFIG_PINCTRL_DENVERTON)                += pinctrl-denverton.o
+obj-$(CONFIG_PINCTRL_EMMITSBURG)       += pinctrl-emmitsburg.o
 obj-$(CONFIG_PINCTRL_GEMINILAKE)       += pinctrl-geminilake.o
 obj-$(CONFIG_PINCTRL_ICELAKE)          += pinctrl-icelake.o
 obj-$(CONFIG_PINCTRL_JASPERLAKE)       += pinctrl-jasperlake.o
index 615174a..d6e35cb 100644 (file)
@@ -1372,13 +1372,13 @@ static void byt_irq_unmask(struct irq_data *d)
        switch (irqd_get_trigger_type(d)) {
        case IRQ_TYPE_LEVEL_HIGH:
                value |= BYT_TRIG_LVL;
-               /* fall through */
+               fallthrough;
        case IRQ_TYPE_EDGE_RISING:
                value |= BYT_TRIG_POS;
                break;
        case IRQ_TYPE_LEVEL_LOW:
                value |= BYT_TRIG_LVL;
-               /* fall through */
+               fallthrough;
        case IRQ_TYPE_EDGE_FALLING:
                value |= BYT_TRIG_NEG;
                break;
@@ -1796,9 +1796,8 @@ static struct platform_driver byt_gpio_driver = {
        .driver         = {
                .name                   = "byt_gpio",
                .pm                     = &byt_gpio_pm_ops,
+               .acpi_match_table       = byt_gpio_acpi_match,
                .suppress_bind_attrs    = true,
-
-               .acpi_match_table = ACPI_PTR(byt_gpio_acpi_match),
        },
 };
 
index 8e3953a..9ef2461 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Cherryview/Braswell pinctrl driver
  *
- * Copyright (C) 2014, Intel Corporation
+ * Copyright (C) 2014, 2020 Intel Corporation
  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  *
  * This driver is based on the original Cherryview GPIO driver by
 #define CHV_PADCTRL1_INTWAKECFG_BOTH   3
 #define CHV_PADCTRL1_INTWAKECFG_LEVEL  4
 
-/**
- * struct chv_community - A community specific configuration
- * @uid: ACPI _UID used to match the community
- * @pins: All pins in this community
- * @npins: Number of pins
- * @groups: All groups in this community
- * @ngroups: Number of groups
- * @functions: All functions in this community
- * @nfunctions: Number of functions
- * @gpps: Pad groups
- * @ngpps: Number of pad groups in this community
- * @nirqs: Total number of IRQs this community can generate
- * @acpi_space_id: An address space ID for ACPI OpRegion handler
- */
-struct chv_community {
-       const char *uid;
-       const struct pinctrl_pin_desc *pins;
-       size_t npins;
-       const struct intel_pingroup *groups;
-       size_t ngroups;
-       const struct intel_function *functions;
-       size_t nfunctions;
-       const struct intel_padgroup *gpps;
-       size_t ngpps;
-       size_t nirqs;
-       acpi_adr_space_type acpi_space_id;
-};
-
-struct chv_pin_context {
+struct intel_pad_context {
        u32 padctrl0;
        u32 padctrl1;
 };
@@ -107,13 +79,13 @@ struct chv_pin_context {
  * @pctldev: Pointer to the pin controller device
  * @chip: GPIO chip in this pin controller
  * @irqchip: IRQ chip in this pin controller
- * @regs: MMIO registers
+ * @soc: Community specific pin configuration data
+ * @communities: All communities in this pin controller
+ * @ncommunities: Number of communities in this pin controller
+ * @context: Configuration saved over system sleep
  * @irq: Our parent irq
- * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
- *             offset (in GPIO number space)
- * @community: Community this pinctrl instance represents
+ * @intr_lines: Mapping between 16 HW interrupt wires and GPIO offset (in GPIO number space)
  * @saved_intmask: Interrupt mask saved for system sleep
- * @saved_pin_context: Pointer to a context of the pins saved for system sleep
  *
  * The first group in @groups is expected to contain all pins that can be
  * used as GPIOs.
@@ -124,24 +96,34 @@ struct chv_pinctrl {
        struct pinctrl_dev *pctldev;
        struct gpio_chip chip;
        struct irq_chip irqchip;
-       void __iomem *regs;
-       unsigned int irq;
+       const struct intel_pinctrl_soc_data *soc;
+       struct intel_community *communities;
+       size_t ncommunities;
+       struct intel_pinctrl_context context;
+       int irq;
+
        unsigned int intr_lines[16];
-       const struct chv_community *community;
        u32 saved_intmask;
-       struct chv_pin_context *saved_pin_context;
 };
 
 #define        PINMODE_INVERT_OE       BIT(15)
 
 #define PINMODE(m, i)          ((m) | ((i) * PINMODE_INVERT_OE))
 
-#define CHV_GPP(start, end)            \
+#define CHV_GPP(start, end)                    \
        {                                       \
                .base = (start),                \
                .size = (end) - (start) + 1,    \
        }
 
+#define CHV_COMMUNITY(g, i, a)                 \
+       {                                       \
+               .gpps = (g),                    \
+               .ngpps = ARRAY_SIZE(g),         \
+               .nirqs = (i),                   \
+               .acpi_space_id = (a),           \
+       }
+
 static const struct pinctrl_pin_desc southwest_pins[] = {
        PINCTRL_PIN(0, "FST_SPI_D2"),
        PINCTRL_PIN(1, "FST_SPI_D0"),
@@ -303,7 +285,15 @@ static const struct intel_padgroup southwest_gpps[] = {
        CHV_GPP(90, 97),
 };
 
-static const struct chv_community southwest_community = {
+/*
+ * Southwest community can generate GPIO interrupts only for the first 8
+ * interrupts. The upper half (8-15) can only be used to trigger GPEs.
+ */
+static const struct intel_community southwest_communities[] = {
+       CHV_COMMUNITY(southwest_gpps, 8, 0x91),
+};
+
+static const struct intel_pinctrl_soc_data southwest_soc_data = {
        .uid = "1",
        .pins = southwest_pins,
        .npins = ARRAY_SIZE(southwest_pins),
@@ -311,15 +301,8 @@ static const struct chv_community southwest_community = {
        .ngroups = ARRAY_SIZE(southwest_groups),
        .functions = southwest_functions,
        .nfunctions = ARRAY_SIZE(southwest_functions),
-       .gpps = southwest_gpps,
-       .ngpps = ARRAY_SIZE(southwest_gpps),
-       /*
-        * Southwest community can generate GPIO interrupts only for the
-        * first 8 interrupts. The upper half (8-15) can only be used to
-        * trigger GPEs.
-        */
-       .nirqs = 8,
-       .acpi_space_id = 0x91,
+       .communities = southwest_communities,
+       .ncommunities = ARRAY_SIZE(southwest_communities),
 };
 
 static const struct pinctrl_pin_desc north_pins[] = {
@@ -396,19 +379,20 @@ static const struct intel_padgroup north_gpps[] = {
        CHV_GPP(60, 72),
 };
 
-static const struct chv_community north_community = {
+/*
+ * North community can generate GPIO interrupts only for the first 8
+ * interrupts. The upper half (8-15) can only be used to trigger GPEs.
+ */
+static const struct intel_community north_communities[] = {
+       CHV_COMMUNITY(north_gpps, 8, 0x92),
+};
+
+static const struct intel_pinctrl_soc_data north_soc_data = {
        .uid = "2",
        .pins = north_pins,
        .npins = ARRAY_SIZE(north_pins),
-       .gpps = north_gpps,
-       .ngpps = ARRAY_SIZE(north_gpps),
-       /*
-        * North community can generate GPIO interrupts only for the first
-        * 8 interrupts. The upper half (8-15) can only be used to trigger
-        * GPEs.
-        */
-       .nirqs = 8,
-       .acpi_space_id = 0x92,
+       .communities = north_communities,
+       .ncommunities = ARRAY_SIZE(north_communities),
 };
 
 static const struct pinctrl_pin_desc east_pins[] = {
@@ -444,14 +428,16 @@ static const struct intel_padgroup east_gpps[] = {
        CHV_GPP(15, 26),
 };
 
-static const struct chv_community east_community = {
+static const struct intel_community east_communities[] = {
+       CHV_COMMUNITY(east_gpps, 16, 0x93),
+};
+
+static const struct intel_pinctrl_soc_data east_soc_data = {
        .uid = "3",
        .pins = east_pins,
        .npins = ARRAY_SIZE(east_pins),
-       .gpps = east_gpps,
-       .ngpps = ARRAY_SIZE(east_gpps),
-       .nirqs = 16,
-       .acpi_space_id = 0x93,
+       .communities = east_communities,
+       .ncommunities = ARRAY_SIZE(east_communities),
 };
 
 static const struct pinctrl_pin_desc southeast_pins[] = {
@@ -566,7 +552,11 @@ static const struct intel_padgroup southeast_gpps[] = {
        CHV_GPP(75, 85),
 };
 
-static const struct chv_community southeast_community = {
+static const struct intel_community southeast_communities[] = {
+       CHV_COMMUNITY(southeast_gpps, 16, 0x94),
+};
+
+static const struct intel_pinctrl_soc_data southeast_soc_data = {
        .uid = "4",
        .pins = southeast_pins,
        .npins = ARRAY_SIZE(southeast_pins),
@@ -574,17 +564,16 @@ static const struct chv_community southeast_community = {
        .ngroups = ARRAY_SIZE(southeast_groups),
        .functions = southeast_functions,
        .nfunctions = ARRAY_SIZE(southeast_functions),
-       .gpps = southeast_gpps,
-       .ngpps = ARRAY_SIZE(southeast_gpps),
-       .nirqs = 16,
-       .acpi_space_id = 0x94,
+       .communities = southeast_communities,
+       .ncommunities = ARRAY_SIZE(southeast_communities),
 };
 
-static const struct chv_community *chv_communities[] = {
-       &southwest_community,
-       &north_community,
-       &east_community,
-       &southeast_community,
+static const struct intel_pinctrl_soc_data *chv_soc_data[] = {
+       &southwest_soc_data,
+       &north_soc_data,
+       &east_soc_data,
+       &southeast_soc_data,
+       NULL
 };
 
 /*
@@ -598,39 +587,60 @@ static const struct chv_community *chv_communities[] = {
  */
 static DEFINE_RAW_SPINLOCK(chv_lock);
 
+static u32 chv_pctrl_readl(struct chv_pinctrl *pctrl, unsigned int offset)
+{
+       const struct intel_community *community = &pctrl->communities[0];
+
+       return readl(community->regs + offset);
+}
+
+static void chv_pctrl_writel(struct chv_pinctrl *pctrl, unsigned int offset, u32 value)
+{
+       const struct intel_community *community = &pctrl->communities[0];
+       void __iomem *reg = community->regs + offset;
+
+       /* Write and simple read back to confirm the bus transferring done */
+       writel(value, reg);
+       readl(reg);
+}
+
 static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned int offset,
                                unsigned int reg)
 {
+       const struct intel_community *community = &pctrl->communities[0];
        unsigned int family_no = offset / MAX_FAMILY_PAD_GPIO_NO;
        unsigned int pad_no = offset % MAX_FAMILY_PAD_GPIO_NO;
 
-       offset = FAMILY_PAD_REGS_OFF + FAMILY_PAD_REGS_SIZE * family_no +
-                GPIO_REGS_SIZE * pad_no;
+       offset = FAMILY_PAD_REGS_SIZE * family_no + GPIO_REGS_SIZE * pad_no;
 
-       return pctrl->regs + offset + reg;
+       return community->pad_regs + offset + reg;
 }
 
-static void chv_writel(u32 value, void __iomem *reg)
+static u32 chv_readl(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset)
 {
+       return readl(chv_padreg(pctrl, pin, offset));
+}
+
+static void chv_writel(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset, u32 value)
+{
+       void __iomem *reg = chv_padreg(pctrl, pin, offset);
+
+       /* Write and simple read back to confirm the bus transferring done */
        writel(value, reg);
-       /* simple readback to confirm the bus transferring done */
        readl(reg);
 }
 
 /* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
 static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned int offset)
 {
-       void __iomem *reg;
-
-       reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
-       return readl(reg) & CHV_PADCTRL1_CFGLOCK;
+       return chv_readl(pctrl, offset, CHV_PADCTRL1) & CHV_PADCTRL1_CFGLOCK;
 }
 
 static int chv_get_groups_count(struct pinctrl_dev *pctldev)
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       return pctrl->community->ngroups;
+       return pctrl->soc->ngroups;
 }
 
 static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
@@ -638,7 +648,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       return pctrl->community->groups[group].name;
+       return pctrl->soc->groups[group].name;
 }
 
 static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
@@ -646,8 +656,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       *pins = pctrl->community->groups[group].pins;
-       *npins = pctrl->community->groups[group].npins;
+       *pins = pctrl->soc->groups[group].pins;
+       *npins = pctrl->soc->groups[group].npins;
        return 0;
 }
 
@@ -661,8 +671,8 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
-       ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
-       ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+       ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
+       ctrl1 = chv_readl(pctrl, offset, CHV_PADCTRL1);
        locked = chv_pad_locked(pctrl, offset);
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -695,7 +705,7 @@ static int chv_get_functions_count(struct pinctrl_dev *pctldev)
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       return pctrl->community->nfunctions;
+       return pctrl->soc->nfunctions;
 }
 
 static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
@@ -703,7 +713,7 @@ static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       return pctrl->community->functions[function].name;
+       return pctrl->soc->functions[function].name;
 }
 
 static int chv_get_function_groups(struct pinctrl_dev *pctldev,
@@ -713,8 +723,8 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev,
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
-       *groups = pctrl->community->functions[function].groups;
-       *ngroups = pctrl->community->functions[function].ngroups;
+       *groups = pctrl->soc->functions[function].groups;
+       *ngroups = pctrl->soc->functions[function].ngroups;
        return 0;
 }
 
@@ -726,7 +736,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
        unsigned long flags;
        int i;
 
-       grp = &pctrl->community->groups[group];
+       grp = &pctrl->soc->groups[group];
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
@@ -742,7 +752,6 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
        for (i = 0; i < grp->npins; i++) {
                int pin = grp->pins[i];
-               void __iomem *reg;
                unsigned int mode;
                bool invert_oe;
                u32 value;
@@ -757,21 +766,19 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
                invert_oe = mode & PINMODE_INVERT_OE;
                mode &= ~PINMODE_INVERT_OE;
 
-               reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
-               value = readl(reg);
+               value = chv_readl(pctrl, pin, CHV_PADCTRL0);
                /* Disable GPIO mode */
                value &= ~CHV_PADCTRL0_GPIOEN;
                /* Set to desired mode */
                value &= ~CHV_PADCTRL0_PMODE_MASK;
                value |= mode << CHV_PADCTRL0_PMODE_SHIFT;
-               chv_writel(value, reg);
+               chv_writel(pctrl, pin, CHV_PADCTRL0, value);
 
                /* Update for invert_oe */
-               reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
-               value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK;
+               value = chv_readl(pctrl, pin, CHV_PADCTRL1) & ~CHV_PADCTRL1_INVRXTX_MASK;
                if (invert_oe)
                        value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
-               chv_writel(value, reg);
+               chv_writel(pctrl, pin, CHV_PADCTRL1, value);
 
                dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
                        pin, mode, invert_oe ? "" : "not ");
@@ -785,14 +792,12 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
 static void chv_gpio_clear_triggering(struct chv_pinctrl *pctrl,
                                      unsigned int offset)
 {
-       void __iomem *reg;
        u32 value;
 
-       reg = chv_padreg(pctrl, offset, CHV_PADCTRL1);
-       value = readl(reg);
+       value = chv_readl(pctrl, offset, CHV_PADCTRL1);
        value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
        value &= ~CHV_PADCTRL1_INVRXTX_MASK;
-       chv_writel(value, reg);
+       chv_writel(pctrl, offset, CHV_PADCTRL1, value);
 }
 
 static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -801,13 +806,12 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        unsigned long flags;
-       void __iomem *reg;
        u32 value;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
        if (chv_pad_locked(pctrl, offset)) {
-               value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+               value = chv_readl(pctrl, offset, CHV_PADCTRL0);
                if (!(value & CHV_PADCTRL0_GPIOEN)) {
                        /* Locked so cannot enable */
                        raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -827,8 +831,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
                /* Disable interrupt generation */
                chv_gpio_clear_triggering(pctrl, offset);
 
-               reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
-               value = readl(reg);
+               value = chv_readl(pctrl, offset, CHV_PADCTRL0);
 
                /*
                 * If the pin is in HiZ mode (both TX and RX buffers are
@@ -837,13 +840,12 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
                if ((value & CHV_PADCTRL0_GPIOCFG_MASK) ==
                     (CHV_PADCTRL0_GPIOCFG_HIZ << CHV_PADCTRL0_GPIOCFG_SHIFT)) {
                        value &= ~CHV_PADCTRL0_GPIOCFG_MASK;
-                       value |= CHV_PADCTRL0_GPIOCFG_GPI <<
-                               CHV_PADCTRL0_GPIOCFG_SHIFT;
+                       value |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT;
                }
 
                /* Switch to a GPIO mode */
                value |= CHV_PADCTRL0_GPIOEN;
-               chv_writel(value, reg);
+               chv_writel(pctrl, offset, CHV_PADCTRL0, value);
        }
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -871,18 +873,17 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
                                  unsigned int offset, bool input)
 {
        struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-       void __iomem *reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
        unsigned long flags;
        u32 ctrl0;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
-       ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+       ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0) & ~CHV_PADCTRL0_GPIOCFG_MASK;
        if (input)
                ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT;
        else
                ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
-       chv_writel(ctrl0, reg);
+       chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0);
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
@@ -910,8 +911,8 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
        u32 term;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
-       ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+       ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0);
+       ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
@@ -982,12 +983,11 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
 static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
                               enum pin_config_param param, u32 arg)
 {
-       void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
        unsigned long flags;
        u32 ctrl0, pull;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       ctrl0 = readl(reg);
+       ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0);
 
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
@@ -1039,7 +1039,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
                return -EINVAL;
        }
 
-       chv_writel(ctrl0, reg);
+       chv_writel(pctrl, pin, CHV_PADCTRL0, ctrl0);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        return 0;
@@ -1048,19 +1048,18 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
 static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin,
                               bool enable)
 {
-       void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
        unsigned long flags;
        u32 ctrl1;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       ctrl1 = readl(reg);
+       ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1);
 
        if (enable)
                ctrl1 |= CHV_PADCTRL1_ODEN;
        else
                ctrl1 &= ~CHV_PADCTRL1_ODEN;
 
-       chv_writel(ctrl1, reg);
+       chv_writel(pctrl, pin, CHV_PADCTRL1, ctrl1);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        return 0;
@@ -1175,7 +1174,7 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
        u32 ctrl0, cfg;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+       ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
@@ -1190,20 +1189,18 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 {
        struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
        unsigned long flags;
-       void __iomem *reg;
        u32 ctrl0;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
-       reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
-       ctrl0 = readl(reg);
+       ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
 
        if (value)
                ctrl0 |= CHV_PADCTRL0_GPIOTXSTATE;
        else
                ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE;
 
-       chv_writel(ctrl0, reg);
+       chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0);
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 }
@@ -1215,7 +1212,7 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+       ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
        direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
@@ -1259,10 +1256,10 @@ static void chv_gpio_irq_ack(struct irq_data *d)
 
        raw_spin_lock(&chv_lock);
 
-       intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+       intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0);
        intr_line &= CHV_PADCTRL0_INTSEL_MASK;
        intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
-       chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+       chv_pctrl_writel(pctrl, CHV_INTSTAT, BIT(intr_line));
 
        raw_spin_unlock(&chv_lock);
 }
@@ -1277,16 +1274,16 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
-       intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+       intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0);
        intr_line &= CHV_PADCTRL0_INTSEL_MASK;
        intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-       value = readl(pctrl->regs + CHV_INTMASK);
+       value = chv_pctrl_readl(pctrl, CHV_INTMASK);
        if (mask)
                value &= ~BIT(intr_line);
        else
                value |= BIT(intr_line);
-       chv_writel(value, pctrl->regs + CHV_INTMASK);
+       chv_pctrl_writel(pctrl, CHV_INTMASK, value);
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 }
@@ -1322,11 +1319,11 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
                u32 intsel, value;
 
                raw_spin_lock_irqsave(&chv_lock, flags);
-               intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+               intsel = chv_readl(pctrl, pin, CHV_PADCTRL0);
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-               value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+               value = chv_readl(pctrl, pin, CHV_PADCTRL1);
                if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
                        handler = handle_level_irq;
                else
@@ -1367,9 +1364,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
         *      Driver programs the IntWakeCfg bits and save the mapping.
         */
        if (!chv_pad_locked(pctrl, pin)) {
-               void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
-
-               value = readl(reg);
+               value = chv_readl(pctrl, pin, CHV_PADCTRL1);
                value &= ~CHV_PADCTRL1_INTWAKECFG_MASK;
                value &= ~CHV_PADCTRL1_INVRXTX_MASK;
 
@@ -1386,10 +1381,10 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
                                value |= CHV_PADCTRL1_INVRXTX_RXDATA;
                }
 
-               chv_writel(value, reg);
+               chv_writel(pctrl, pin, CHV_PADCTRL1, value);
        }
 
-       value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+       value = chv_readl(pctrl, pin, CHV_PADCTRL0);
        value &= CHV_PADCTRL0_INTSEL_MASK;
        value >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
@@ -1409,6 +1404,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
 {
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
        struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+       const struct intel_community *community = &pctrl->communities[0];
        struct irq_chip *chip = irq_desc_get_chip(desc);
        unsigned long pending;
        unsigned long flags;
@@ -1417,10 +1413,10 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_enter(chip, desc);
 
        raw_spin_lock_irqsave(&chv_lock, flags);
-       pending = readl(pctrl->regs + CHV_INTSTAT);
+       pending = chv_pctrl_readl(pctrl, CHV_INTSTAT);
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
-       for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
+       for_each_set_bit(intr_line, &pending, community->nirqs) {
                unsigned int irq, offset;
 
                offset = pctrl->intr_lines[intr_line];
@@ -1477,17 +1473,17 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
                                    unsigned int ngpios)
 {
        struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
-       const struct chv_community *community = pctrl->community;
+       const struct intel_community *community = &pctrl->communities[0];
        int i;
 
        /* Do not add GPIOs that can only generate GPEs to the IRQ domain */
-       for (i = 0; i < community->npins; i++) {
+       for (i = 0; i < pctrl->soc->npins; i++) {
                const struct pinctrl_pin_desc *desc;
                u32 intsel;
 
-               desc = &community->pins[i];
+               desc = &pctrl->soc->pins[i];
 
-               intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0));
+               intsel = chv_readl(pctrl, desc->number, CHV_PADCTRL0);
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
@@ -1499,6 +1495,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
 static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
 {
        struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+       const struct intel_community *community = &pctrl->communities[0];
 
        /*
         * The same set of machines in chv_no_valid_mask[] have incorrectly
@@ -1512,12 +1509,11 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
                 * Mask all interrupts the community is able to generate
                 * but leave the ones that can only generate GPEs unmasked.
                 */
-               chv_writel(GENMASK(31, pctrl->community->nirqs),
-                          pctrl->regs + CHV_INTMASK);
+               chv_pctrl_writel(pctrl, CHV_INTMASK, GENMASK(31, community->nirqs));
        }
 
        /* Clear all interrupts */
-       chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+       chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff);
 
        return 0;
 }
@@ -1525,7 +1521,7 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
 static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
 {
        struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
-       const struct chv_community *community = pctrl->community;
+       const struct intel_community *community = &pctrl->communities[0];
        const struct intel_padgroup *gpp;
        int ret, i;
 
@@ -1545,15 +1541,15 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
+       const struct intel_community *community = &pctrl->communities[0];
        const struct intel_padgroup *gpp;
        struct gpio_chip *chip = &pctrl->chip;
        bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
-       const struct chv_community *community = pctrl->community;
        int ret, i, irq_base;
 
        *chip = chv_gpio_chip;
 
-       chip->ngpio = community->pins[community->npins - 1].number + 1;
+       chip->ngpio = pctrl->soc->pins[pctrl->soc->npins - 1].number + 1;
        chip->label = dev_name(pctrl->dev);
        chip->add_pin_ranges = chv_gpio_add_pin_ranges;
        chip->parent = pctrl->dev;
@@ -1579,7 +1575,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                chip->irq.init_valid_mask = chv_init_irq_valid_mask;
        } else {
                irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
-                                               community->npins, NUMA_NO_NODE);
+                                               pctrl->soc->npins, NUMA_NO_NODE);
                if (irq_base < 0) {
                        dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
                        return irq_base;
@@ -1616,9 +1612,9 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
        raw_spin_lock_irqsave(&chv_lock, flags);
 
        if (function == ACPI_WRITE)
-               chv_writel((u32)(*value), pctrl->regs + (u32)address);
+               chv_pctrl_writel(pctrl, address, *value);
        else if (function == ACPI_READ)
-               *value = readl(pctrl->regs + (u32)address);
+               *value = chv_pctrl_readl(pctrl, address);
        else
                ret = AE_BAD_PARAMETER;
 
@@ -1629,6 +1625,10 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
 
 static int chv_pinctrl_probe(struct platform_device *pdev)
 {
+       const struct intel_pinctrl_soc_data *soc_data = NULL;
+       const struct intel_pinctrl_soc_data **soc_table;
+       struct intel_community *community;
+       struct device *dev = &pdev->dev;
        struct chv_pinctrl *pctrl;
        struct acpi_device *adev;
        acpi_status status;
@@ -1638,40 +1638,53 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
        if (!adev)
                return -ENODEV;
 
-       pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
-       if (!pctrl)
-               return -ENOMEM;
-
-       for (i = 0; i < ARRAY_SIZE(chv_communities); i++)
-               if (!strcmp(adev->pnp.unique_id, chv_communities[i]->uid)) {
-                       pctrl->community = chv_communities[i];
+       soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
+       for (i = 0; soc_table[i]; i++) {
+               if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
+                       soc_data = soc_table[i];
                        break;
                }
-       if (i == ARRAY_SIZE(chv_communities))
+       }
+       if (!soc_data)
                return -ENODEV;
 
+       pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+       if (!pctrl)
+               return -ENOMEM;
+
        pctrl->dev = &pdev->dev;
+       pctrl->soc = soc_data;
+
+       pctrl->ncommunities = pctrl->soc->ncommunities;
+       pctrl->communities = devm_kmemdup(dev, pctrl->soc->communities,
+                                         pctrl->ncommunities * sizeof(*pctrl->communities),
+                                         GFP_KERNEL);
+       if (!pctrl->communities)
+               return -ENOMEM;
+
+       community = &pctrl->communities[0];
+       community->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(community->regs))
+               return PTR_ERR(community->regs);
+
+       community->pad_regs = community->regs + FAMILY_PAD_REGS_OFF;
 
 #ifdef CONFIG_PM_SLEEP
-       pctrl->saved_pin_context = devm_kcalloc(pctrl->dev,
-               pctrl->community->npins, sizeof(*pctrl->saved_pin_context),
-               GFP_KERNEL);
-       if (!pctrl->saved_pin_context)
+       pctrl->context.pads = devm_kcalloc(dev, pctrl->soc->npins,
+                                          sizeof(*pctrl->context.pads),
+                                          GFP_KERNEL);
+       if (!pctrl->context.pads)
                return -ENOMEM;
 #endif
 
-       pctrl->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(pctrl->regs))
-               return PTR_ERR(pctrl->regs);
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
 
        pctrl->pctldesc = chv_pinctrl_desc;
        pctrl->pctldesc.name = dev_name(&pdev->dev);
-       pctrl->pctldesc.pins = pctrl->community->pins;
-       pctrl->pctldesc.npins = pctrl->community->npins;
+       pctrl->pctldesc.pins = pctrl->soc->pins;
+       pctrl->pctldesc.npins = pctrl->soc->npins;
 
        pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
                                               pctrl);
@@ -1685,7 +1698,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
                return ret;
 
        status = acpi_install_address_space_handler(adev->handle,
-                                       pctrl->community->acpi_space_id,
+                                       community->acpi_space_id,
                                        chv_pinctrl_mmio_access_handler,
                                        NULL, pctrl);
        if (ACPI_FAILURE(status))
@@ -1699,9 +1712,10 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
 static int chv_pinctrl_remove(struct platform_device *pdev)
 {
        struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+       const struct intel_community *community = &pctrl->communities[0];
 
        acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
-                                         pctrl->community->acpi_space_id,
+                                         community->acpi_space_id,
                                          chv_pinctrl_mmio_access_handler);
 
        return 0;
@@ -1716,24 +1730,20 @@ static int chv_pinctrl_suspend_noirq(struct device *dev)
 
        raw_spin_lock_irqsave(&chv_lock, flags);
 
-       pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
+       pctrl->saved_intmask = chv_pctrl_readl(pctrl, CHV_INTMASK);
 
-       for (i = 0; i < pctrl->community->npins; i++) {
+       for (i = 0; i < pctrl->soc->npins; i++) {
                const struct pinctrl_pin_desc *desc;
-               struct chv_pin_context *ctx;
-               void __iomem *reg;
+               struct intel_pad_context *ctx = &pctrl->context.pads[i];
 
-               desc = &pctrl->community->pins[i];
+               desc = &pctrl->soc->pins[i];
                if (chv_pad_locked(pctrl, desc->number))
                        continue;
 
-               ctx = &pctrl->saved_pin_context[i];
-
-               reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0);
-               ctx->padctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE;
+               ctx->padctrl0 = chv_readl(pctrl, desc->number, CHV_PADCTRL0);
+               ctx->padctrl0 &= ~CHV_PADCTRL0_GPIORXSTATE;
 
-               reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1);
-               ctx->padctrl1 = readl(reg);
+               ctx->padctrl1 = chv_readl(pctrl, desc->number, CHV_PADCTRL1);
        }
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -1754,35 +1764,31 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
         * registers because we don't know in which state BIOS left them
         * upon exiting suspend.
         */
-       chv_writel(0, pctrl->regs + CHV_INTMASK);
+       chv_pctrl_writel(pctrl, CHV_INTMASK, 0x0000);
 
-       for (i = 0; i < pctrl->community->npins; i++) {
+       for (i = 0; i < pctrl->soc->npins; i++) {
                const struct pinctrl_pin_desc *desc;
-               const struct chv_pin_context *ctx;
-               void __iomem *reg;
+               struct intel_pad_context *ctx = &pctrl->context.pads[i];
                u32 val;
 
-               desc = &pctrl->community->pins[i];
+               desc = &pctrl->soc->pins[i];
                if (chv_pad_locked(pctrl, desc->number))
                        continue;
 
-               ctx = &pctrl->saved_pin_context[i];
-
                /* Only restore if our saved state differs from the current */
-               reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0);
-               val = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE;
+               val = chv_readl(pctrl, desc->number, CHV_PADCTRL0);
+               val &= ~CHV_PADCTRL0_GPIORXSTATE;
                if (ctx->padctrl0 != val) {
-                       chv_writel(ctx->padctrl0, reg);
+                       chv_writel(pctrl, desc->number, CHV_PADCTRL0, ctx->padctrl0);
                        dev_dbg(pctrl->dev, "restored pin %2u ctrl0 0x%08x\n",
-                               desc->number, readl(reg));
+                               desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL0));
                }
 
-               reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1);
-               val = readl(reg);
+               val = chv_readl(pctrl, desc->number, CHV_PADCTRL1);
                if (ctx->padctrl1 != val) {
-                       chv_writel(ctx->padctrl1, reg);
+                       chv_writel(pctrl, desc->number, CHV_PADCTRL1, ctx->padctrl1);
                        dev_dbg(pctrl->dev, "restored pin %2u ctrl1 0x%08x\n",
-                               desc->number, readl(reg));
+                               desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL1));
                }
        }
 
@@ -1790,8 +1796,8 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
         * Now that all pins are restored to known state, we can restore
         * the interrupt mask register as well.
         */
-       chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
-       chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+       chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff);
+       chv_pctrl_writel(pctrl, CHV_INTMASK, pctrl->saved_intmask);
 
        raw_spin_unlock_irqrestore(&chv_lock, flags);
 
@@ -1805,7 +1811,7 @@ static const struct dev_pm_ops chv_pinctrl_pm_ops = {
 };
 
 static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
-       { "INT33FF" },
+       { "INT33FF", (kernel_ulong_t)chv_soc_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match);
diff --git a/drivers/pinctrl/intel/pinctrl-emmitsburg.c b/drivers/pinctrl/intel/pinctrl-emmitsburg.c
new file mode 100644 (file)
index 0000000..f6114db
--- /dev/null
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Emmitsburg PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define EBG_PAD_OWN    0x0a0
+#define EBG_PADCFGLOCK 0x100
+#define EBG_HOSTSW_OWN 0x130
+#define EBG_GPI_IS     0x200
+#define EBG_GPI_IE     0x210
+
+#define EBG_GPP(r, s, e)                               \
+       {                                               \
+               .reg_num = (r),                         \
+               .base = (s),                            \
+               .size = ((e) - (s) + 1),                \
+       }
+
+#define EBG_COMMUNITY(b, s, e, g)                      \
+       {                                               \
+               .barno = (b),                           \
+               .padown_offset = EBG_PAD_OWN,           \
+               .padcfglock_offset = EBG_PADCFGLOCK,    \
+               .hostown_offset = EBG_HOSTSW_OWN,       \
+               .is_offset = EBG_GPI_IS,                \
+               .ie_offset = EBG_GPI_IE,                \
+               .pin_base = (s),                        \
+               .npins = ((e) - (s) + 1),               \
+               .gpps = (g),                            \
+               .ngpps = ARRAY_SIZE(g),                 \
+       }
+
+/* Emmitsburg */
+static const struct pinctrl_pin_desc ebg_pins[] = {
+       /* GPP_A */
+       PINCTRL_PIN(0, "ESPI_ALERT0B"),
+       PINCTRL_PIN(1, "ESPI_ALERT1B"),
+       PINCTRL_PIN(2, "ESPI_IO_0"),
+       PINCTRL_PIN(3, "ESPI_IO_1"),
+       PINCTRL_PIN(4, "ESPI_IO_2"),
+       PINCTRL_PIN(5, "ESPI_IO_3"),
+       PINCTRL_PIN(6, "ESPI_CS0B"),
+       PINCTRL_PIN(7, "ESPI_CS1B"),
+       PINCTRL_PIN(8, "ESPI_RESETB"),
+       PINCTRL_PIN(9, "ESPI_CLK"),
+       PINCTRL_PIN(10, "SRCCLKREQB_0"),
+       PINCTRL_PIN(11, "SRCCLKREQB_1"),
+       PINCTRL_PIN(12, "SRCCLKREQB_2"),
+       PINCTRL_PIN(13, "SRCCLKREQB_3"),
+       PINCTRL_PIN(14, "SRCCLKREQB_4"),
+       PINCTRL_PIN(15, "SRCCLKREQB_5"),
+       PINCTRL_PIN(16, "SRCCLKREQB_6"),
+       PINCTRL_PIN(17, "SRCCLKREQB_7"),
+       PINCTRL_PIN(18, "SRCCLKREQB_8"),
+       PINCTRL_PIN(19, "SRCCLKREQB_9"),
+       PINCTRL_PIN(20, "ESPI_CLK_LOOPBK"),
+       /* GPP_B */
+       PINCTRL_PIN(21, "GSXDOUT"),
+       PINCTRL_PIN(22, "GSXSLOAD"),
+       PINCTRL_PIN(23, "GSXDIN"),
+       PINCTRL_PIN(24, "GSXSRESETB"),
+       PINCTRL_PIN(25, "GSXCLK"),
+       PINCTRL_PIN(26, "USB2_OCB_0"),
+       PINCTRL_PIN(27, "USB2_OCB_1"),
+       PINCTRL_PIN(28, "USB2_OCB_2"),
+       PINCTRL_PIN(29, "USB2_OCB_3"),
+       PINCTRL_PIN(30, "USB2_OCB_4"),
+       PINCTRL_PIN(31, "USB2_OCB_5"),
+       PINCTRL_PIN(32, "USB2_OCB_6"),
+       PINCTRL_PIN(33, "HS_UART0_RXD"),
+       PINCTRL_PIN(34, "HS_UART0_TXD"),
+       PINCTRL_PIN(35, "HS_UART0_RTSB"),
+       PINCTRL_PIN(36, "HS_UART0_CTSB"),
+       PINCTRL_PIN(37, "HS_UART1_RXD"),
+       PINCTRL_PIN(38, "HS_UART1_TXD"),
+       PINCTRL_PIN(39, "HS_UART1_RTSB"),
+       PINCTRL_PIN(40, "HS_UART1_CTSB"),
+       PINCTRL_PIN(41, "GPPC_B_20"),
+       PINCTRL_PIN(42, "GPPC_B_21"),
+       PINCTRL_PIN(43, "GPPC_B_22"),
+       PINCTRL_PIN(44, "PS_ONB"),
+       /* SPI */
+       PINCTRL_PIN(45, "SPI0_IO_2"),
+       PINCTRL_PIN(46, "SPI0_IO_3"),
+       PINCTRL_PIN(47, "SPI0_MOSI_IO_0"),
+       PINCTRL_PIN(48, "SPI0_MISO_IO_1"),
+       PINCTRL_PIN(49, "SPI0_TPM_CSB"),
+       PINCTRL_PIN(50, "SPI0_FLASH_0_CSB"),
+       PINCTRL_PIN(51, "SPI0_FLASH_1_CSB"),
+       PINCTRL_PIN(52, "SPI0_CLK"),
+       PINCTRL_PIN(53, "TIME_SYNC_0"),
+       PINCTRL_PIN(54, "SPKR"),
+       PINCTRL_PIN(55, "CPU_GP_0"),
+       PINCTRL_PIN(56, "CPU_GP_1"),
+       PINCTRL_PIN(57, "CPU_GP_2"),
+       PINCTRL_PIN(58, "CPU_GP_3"),
+       PINCTRL_PIN(59, "SUSWARNB_SUSPWRDNACK"),
+       PINCTRL_PIN(60, "SUSACKB"),
+       PINCTRL_PIN(61, "NMIB"),
+       PINCTRL_PIN(62, "SMIB"),
+       PINCTRL_PIN(63, "GPPC_S_10"),
+       PINCTRL_PIN(64, "GPPC_S_11"),
+       PINCTRL_PIN(65, "SPI_CLK_LOOPBK"),
+       /* GPP_C */
+       PINCTRL_PIN(66, "ME_SML0CLK"),
+       PINCTRL_PIN(67, "ME_SML0DATA"),
+       PINCTRL_PIN(68, "ME_SML0ALERTB"),
+       PINCTRL_PIN(69, "ME_SML0BDATA"),
+       PINCTRL_PIN(70, "ME_SML0BCLK"),
+       PINCTRL_PIN(71, "ME_SML0BALERTB"),
+       PINCTRL_PIN(72, "ME_SML1CLK"),
+       PINCTRL_PIN(73, "ME_SML1DATA"),
+       PINCTRL_PIN(74, "ME_SML1ALERTB"),
+       PINCTRL_PIN(75, "ME_SML2CLK"),
+       PINCTRL_PIN(76, "ME_SML2DATA"),
+       PINCTRL_PIN(77, "ME_SML2ALERTB"),
+       PINCTRL_PIN(78, "ME_SML3CLK"),
+       PINCTRL_PIN(79, "ME_SML3DATA"),
+       PINCTRL_PIN(80, "ME_SML3ALERTB"),
+       PINCTRL_PIN(81, "ME_SML4CLK"),
+       PINCTRL_PIN(82, "ME_SML4DATA"),
+       PINCTRL_PIN(83, "ME_SML4ALERTB"),
+       PINCTRL_PIN(84, "GPPC_C_18"),
+       PINCTRL_PIN(85, "MC_SMBCLK"),
+       PINCTRL_PIN(86, "MC_SMBDATA"),
+       PINCTRL_PIN(87, "MC_SMBALERTB"),
+       /* GPP_D */
+       PINCTRL_PIN(88, "HS_SMBCLK"),
+       PINCTRL_PIN(89, "HS_SMBDATA"),
+       PINCTRL_PIN(90, "HS_SMBALERTB"),
+       PINCTRL_PIN(91, "GBE_SMB_ALRT_N"),
+       PINCTRL_PIN(92, "GBE_SMB_CLK"),
+       PINCTRL_PIN(93, "GBE_SMB_DATA"),
+       PINCTRL_PIN(94, "GBE_GPIO10"),
+       PINCTRL_PIN(95, "GBE_GPIO11"),
+       PINCTRL_PIN(96, "CRASHLOG_TRIG_N"),
+       PINCTRL_PIN(97, "PMEB"),
+       PINCTRL_PIN(98, "BM_BUSYB"),
+       PINCTRL_PIN(99, "PLTRSTB"),
+       PINCTRL_PIN(100, "PCHHOTB"),
+       PINCTRL_PIN(101, "ADR_COMPLETE"),
+       PINCTRL_PIN(102, "ADR_TRIGGER_N"),
+       PINCTRL_PIN(103, "VRALERTB"),
+       PINCTRL_PIN(104, "ADR_ACK"),
+       PINCTRL_PIN(105, "THERMTRIP_N"),
+       PINCTRL_PIN(106, "MEMTRIP_N"),
+       PINCTRL_PIN(107, "MSMI_N"),
+       PINCTRL_PIN(108, "CATERR_N"),
+       PINCTRL_PIN(109, "GLB_RST_WARN_B"),
+       PINCTRL_PIN(110, "USB2_OCB_7"),
+       PINCTRL_PIN(111, "GPP_D_23"),
+       /* GPP_E */
+       PINCTRL_PIN(112, "SATA1_XPCIE_0"),
+       PINCTRL_PIN(113, "SATA1_XPCIE_1"),
+       PINCTRL_PIN(114, "SATA1_XPCIE_2"),
+       PINCTRL_PIN(115, "SATA1_XPCIE_3"),
+       PINCTRL_PIN(116, "SATA0_XPCIE_2"),
+       PINCTRL_PIN(117, "SATA0_XPCIE_3"),
+       PINCTRL_PIN(118, "SATA0_USB3_XPCIE_0"),
+       PINCTRL_PIN(119, "SATA0_USB3_XPCIE_1"),
+       PINCTRL_PIN(120, "SATA0_SCLOCK"),
+       PINCTRL_PIN(121, "SATA0_SLOAD"),
+       PINCTRL_PIN(122, "SATA0_SDATAOUT"),
+       PINCTRL_PIN(123, "SATA1_SCLOCK"),
+       PINCTRL_PIN(124, "SATA1_SLOAD"),
+       PINCTRL_PIN(125, "SATA1_SDATAOUT"),
+       PINCTRL_PIN(126, "SATA2_SCLOCK"),
+       PINCTRL_PIN(127, "SATA2_SLOAD"),
+       PINCTRL_PIN(128, "SATA2_SDATAOUT"),
+       PINCTRL_PIN(129, "ERR0_N"),
+       PINCTRL_PIN(130, "ERR1_N"),
+       PINCTRL_PIN(131, "ERR2_N"),
+       PINCTRL_PIN(132, "GBE_UART_RXD"),
+       PINCTRL_PIN(133, "GBE_UART_TXD"),
+       PINCTRL_PIN(134, "GBE_UART_RTSB"),
+       PINCTRL_PIN(135, "GBE_UART_CTSB"),
+       /* JTAG */
+       PINCTRL_PIN(136, "JTAG_TDO"),
+       PINCTRL_PIN(137, "JTAG_TDI"),
+       PINCTRL_PIN(138, "JTAG_TCK"),
+       PINCTRL_PIN(139, "JTAG_TMS"),
+       PINCTRL_PIN(140, "JTAGX"),
+       PINCTRL_PIN(141, "PRDYB"),
+       PINCTRL_PIN(142, "PREQB"),
+       PINCTRL_PIN(143, "GLB_PC_DISABLE"),
+       PINCTRL_PIN(144, "DBG_PMODE"),
+       PINCTRL_PIN(145, "GLB_EXT_ACC_DISABLE"),
+       /* GPP_H */
+       PINCTRL_PIN(146, "GBE_GPIO12"),
+       PINCTRL_PIN(147, "GBE_GPIO13"),
+       PINCTRL_PIN(148, "GBE_SDP_TIMESYNC0_S2N"),
+       PINCTRL_PIN(149, "GBE_SDP_TIMESYNC1_S2N"),
+       PINCTRL_PIN(150, "GBE_SDP_TIMESYNC2_S2N"),
+       PINCTRL_PIN(151, "GBE_SDP_TIMESYNC3_S2N"),
+       PINCTRL_PIN(152, "GPPC_H_6"),
+       PINCTRL_PIN(153, "GPPC_H_7"),
+       PINCTRL_PIN(154, "NCSI_CLK_IN"),
+       PINCTRL_PIN(155, "NCSI_CRS_DV"),
+       PINCTRL_PIN(156, "NCSI_RXD0"),
+       PINCTRL_PIN(157, "NCSI_RXD1"),
+       PINCTRL_PIN(158, "NCSI_TX_EN"),
+       PINCTRL_PIN(159, "NCSI_TXD0"),
+       PINCTRL_PIN(160, "NCSI_TXD1"),
+       PINCTRL_PIN(161, "NAC_NCSI_CLK_OUT_0"),
+       PINCTRL_PIN(162, "NAC_NCSI_CLK_OUT_1"),
+       PINCTRL_PIN(163, "NAC_NCSI_CLK_OUT_2"),
+       PINCTRL_PIN(164, "PMCALERTB"),
+       PINCTRL_PIN(165, "GPPC_H_19"),
+       /* GPP_J */
+       PINCTRL_PIN(166, "CPUPWRGD"),
+       PINCTRL_PIN(167, "CPU_THRMTRIP_N"),
+       PINCTRL_PIN(168, "PLTRST_CPUB"),
+       PINCTRL_PIN(169, "TRIGGER0_N"),
+       PINCTRL_PIN(170, "TRIGGER1_N"),
+       PINCTRL_PIN(171, "CPU_PWR_DEBUG_N"),
+       PINCTRL_PIN(172, "CPU_MEMTRIP_N"),
+       PINCTRL_PIN(173, "CPU_MSMI_N"),
+       PINCTRL_PIN(174, "ME_PECI"),
+       PINCTRL_PIN(175, "NAC_SPARE0"),
+       PINCTRL_PIN(176, "NAC_SPARE1"),
+       PINCTRL_PIN(177, "NAC_SPARE2"),
+       PINCTRL_PIN(178, "CPU_ERR0_N"),
+       PINCTRL_PIN(179, "CPU_CATERR_N"),
+       PINCTRL_PIN(180, "CPU_ERR1_N"),
+       PINCTRL_PIN(181, "CPU_ERR2_N"),
+       PINCTRL_PIN(182, "GPP_J_16"),
+       PINCTRL_PIN(183, "GPP_J_17"),
+       /* GPP_I */
+       PINCTRL_PIN(184, "GBE_GPIO4"),
+       PINCTRL_PIN(185, "GBE_GPIO5"),
+       PINCTRL_PIN(186, "GBE_GPIO6"),
+       PINCTRL_PIN(187, "GBE_GPIO7"),
+       PINCTRL_PIN(188, "GBE1_LED1"),
+       PINCTRL_PIN(189, "GBE1_LED2"),
+       PINCTRL_PIN(190, "GBE2_LED0"),
+       PINCTRL_PIN(191, "GBE2_LED1"),
+       PINCTRL_PIN(192, "GBE2_LED2"),
+       PINCTRL_PIN(193, "GBE3_LED0"),
+       PINCTRL_PIN(194, "GBE3_LED1"),
+       PINCTRL_PIN(195, "GBE3_LED2"),
+       PINCTRL_PIN(196, "GBE0_I2C_CLK"),
+       PINCTRL_PIN(197, "GBE0_I2C_DATA"),
+       PINCTRL_PIN(198, "GBE1_I2C_CLK"),
+       PINCTRL_PIN(199, "GBE1_I2C_DATA"),
+       PINCTRL_PIN(200, "GBE2_I2C_CLK"),
+       PINCTRL_PIN(201, "GBE2_I2C_DATA"),
+       PINCTRL_PIN(202, "GBE3_I2C_CLK"),
+       PINCTRL_PIN(203, "GBE3_I2C_DATA"),
+       PINCTRL_PIN(204, "GBE4_I2C_CLK"),
+       PINCTRL_PIN(205, "GBE4_I2C_DATA"),
+       PINCTRL_PIN(206, "GBE_GPIO8"),
+       PINCTRL_PIN(207, "GBE_GPIO9"),
+       /* GPP_L */
+       PINCTRL_PIN(208, "PM_SYNC_0"),
+       PINCTRL_PIN(209, "PM_DOWN_0"),
+       PINCTRL_PIN(210, "PM_SYNC_CLK_0"),
+       PINCTRL_PIN(211, "GPP_L_3"),
+       PINCTRL_PIN(212, "GPP_L_4"),
+       PINCTRL_PIN(213, "GPP_L_5"),
+       PINCTRL_PIN(214, "GPP_L_6"),
+       PINCTRL_PIN(215, "GPP_L_7"),
+       PINCTRL_PIN(216, "GPP_L_8"),
+       PINCTRL_PIN(217, "NAC_GBE_GPIO0_S2N"),
+       PINCTRL_PIN(218, "NAC_GBE_GPIO1_S2N"),
+       PINCTRL_PIN(219, "NAC_GBE_GPIO2_S2N"),
+       PINCTRL_PIN(220, "NAC_GBE_GPIO3_S2N"),
+       PINCTRL_PIN(221, "NAC_GBE_SMB_DATA_IN"),
+       PINCTRL_PIN(222, "NAC_GBE_SMB_DATA_OUT"),
+       PINCTRL_PIN(223, "NAC_GBE_SMB_ALRT_N"),
+       PINCTRL_PIN(224, "NAC_GBE_SMB_CLK_IN"),
+       PINCTRL_PIN(225, "NAC_GBE_SMB_CLK_OUT"),
+       /* GPP_M */
+       PINCTRL_PIN(226, "GPP_M_0"),
+       PINCTRL_PIN(227, "GPP_M_1"),
+       PINCTRL_PIN(228, "GPP_M_2"),
+       PINCTRL_PIN(229, "GPP_M_3"),
+       PINCTRL_PIN(230, "NAC_WAKE_N"),
+       PINCTRL_PIN(231, "GPP_M_5"),
+       PINCTRL_PIN(232, "GPP_M_6"),
+       PINCTRL_PIN(233, "GPP_M_7"),
+       PINCTRL_PIN(234, "GPP_M_8"),
+       PINCTRL_PIN(235, "NAC_SBLINK_S2N"),
+       PINCTRL_PIN(236, "NAC_SBLINK_N2S"),
+       PINCTRL_PIN(237, "NAC_SBLINK_CLK_N2S"),
+       PINCTRL_PIN(238, "NAC_SBLINK_CLK_S2N"),
+       PINCTRL_PIN(239, "NAC_XTAL_VALID"),
+       PINCTRL_PIN(240, "NAC_RESET_NAC_N"),
+       PINCTRL_PIN(241, "GPP_M_15"),
+       PINCTRL_PIN(242, "GPP_M_16"),
+       PINCTRL_PIN(243, "GPP_M_17"),
+       /* GPP_N */
+       PINCTRL_PIN(244, "GPP_N_0"),
+       PINCTRL_PIN(245, "NAC_NCSI_TXD0"),
+       PINCTRL_PIN(246, "GPP_N_2"),
+       PINCTRL_PIN(247, "GPP_N_3"),
+       PINCTRL_PIN(248, "NAC_NCSI_REFCLK_IN"),
+       PINCTRL_PIN(249, "GPP_N_5"),
+       PINCTRL_PIN(250, "GPP_N_6"),
+       PINCTRL_PIN(251, "GPP_N_7"),
+       PINCTRL_PIN(252, "NAC_NCSI_RXD0"),
+       PINCTRL_PIN(253, "NAC_NCSI_RXD1"),
+       PINCTRL_PIN(254, "NAC_NCSI_CRS_DV"),
+       PINCTRL_PIN(255, "NAC_NCSI_CLK_IN"),
+       PINCTRL_PIN(256, "NAC_NCSI_REFCLK_OUT"),
+       PINCTRL_PIN(257, "NAC_NCSI_TX_EN"),
+       PINCTRL_PIN(258, "NAC_NCSI_TXD1"),
+       PINCTRL_PIN(259, "NAC_NCSI_OE_N"),
+       PINCTRL_PIN(260, "NAC_GR_N"),
+       PINCTRL_PIN(261, "NAC_INIT_SX_WAKE_N"),
+};
+
+static const struct intel_padgroup ebg_community0_gpps[] = {
+       EBG_GPP(0, 0, 20),      /* GPP_A */
+       EBG_GPP(1, 21, 44),     /* GPP_B */
+       EBG_GPP(2, 45, 65),     /* SPI */
+};
+
+static const struct intel_padgroup ebg_community1_gpps[] = {
+       EBG_GPP(0, 66, 87),     /* GPP_C */
+       EBG_GPP(1, 88, 111),    /* GPP_D */
+};
+
+static const struct intel_padgroup ebg_community3_gpps[] = {
+       EBG_GPP(0, 112, 135),   /* GPP_E */
+       EBG_GPP(1, 136, 145),   /* JTAG */
+};
+
+static const struct intel_padgroup ebg_community4_gpps[] = {
+       EBG_GPP(0, 146, 165),   /* GPP_H */
+       EBG_GPP(1, 166, 183),   /* GPP_J */
+};
+
+static const struct intel_padgroup ebg_community5_gpps[] = {
+       EBG_GPP(0, 184, 207),   /* GPP_I */
+       EBG_GPP(1, 208, 225),   /* GPP_L */
+       EBG_GPP(2, 226, 243),   /* GPP_M */
+       EBG_GPP(3, 244, 261),   /* GPP_N */
+};
+
+static const struct intel_community ebg_communities[] = {
+       EBG_COMMUNITY(0, 0, 65, ebg_community0_gpps),
+       EBG_COMMUNITY(1, 66, 111, ebg_community1_gpps),
+       EBG_COMMUNITY(2, 112, 145, ebg_community3_gpps),
+       EBG_COMMUNITY(3, 146, 183, ebg_community4_gpps),
+       EBG_COMMUNITY(4, 184, 261, ebg_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data ebg_soc_data = {
+       .pins = ebg_pins,
+       .npins = ARRAY_SIZE(ebg_pins),
+       .communities = ebg_communities,
+       .ncommunities = ARRAY_SIZE(ebg_communities),
+};
+
+static const struct acpi_device_id ebg_pinctrl_acpi_match[] = {
+       { "INTC1071", (kernel_ulong_t)&ebg_soc_data },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, ebg_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(ebg_pinctrl_pm_ops);
+
+static struct platform_driver ebg_pinctrl_driver = {
+       .probe = intel_pinctrl_probe_by_hid,
+       .driver = {
+               .name = "emmitsburg-pinctrl",
+               .acpi_match_table = ebg_pinctrl_acpi_match,
+               .pm = &ebg_pinctrl_pm_ops,
+       },
+};
+
+module_platform_driver(ebg_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Emmitsburg PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
index 6a274e2..b64997b 100644 (file)
@@ -435,11 +435,20 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
 {
        u32 value;
 
+       value = readl(padcfg0);
+
        /* Put the pad into GPIO mode */
-       value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+       value &= ~PADCFG0_PMODE_MASK;
+       value |= PADCFG0_PMODE_GPIO;
+
+       /* Disable input and output buffers */
+       value &= ~PADCFG0_GPIORXDIS;
+       value &= ~PADCFG0_GPIOTXDIS;
+
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+
        writel(value, padcfg0);
 }
 
@@ -451,6 +460,8 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        void __iomem *padcfg0;
        unsigned long flags;
 
+       padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        if (!intel_pad_owned_by_host(pctrl, pin)) {
@@ -463,8 +474,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                return 0;
        }
 
-       padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
        /*
         * If pin is already configured in GPIO mode, we assume that
         * firmware provides correct settings. In such case we avoid
@@ -494,11 +503,10 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        void __iomem *padcfg0;
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&pctrl->lock, flags);
-
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-       __intel_gpio_set_direction(padcfg0, input);
 
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+       __intel_gpio_set_direction(padcfg0, input);
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -513,20 +521,21 @@ static const struct pinmux_ops intel_pinmux_ops = {
        .gpio_set_direction = intel_gpio_set_direction,
 };
 
-static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
-                           unsigned long *config)
+static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+                                enum pin_config_param param, u32 *arg)
 {
-       struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-       enum pin_config_param param = pinconf_to_config_param(*config);
        const struct intel_community *community;
+       void __iomem *padcfg1;
+       unsigned long flags;
        u32 value, term;
-       u32 arg = 0;
-
-       if (!intel_pad_owned_by_host(pctrl, pin))
-               return -ENOTSUPP;
 
        community = intel_get_community(pctrl, pin);
-       value = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
+       padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
+
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+       value = readl(padcfg1);
+       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
        term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
 
        switch (param) {
@@ -541,16 +550,16 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
 
                switch (term) {
                case PADCFG1_TERM_1K:
-                       arg = 1000;
+                       *arg = 1000;
                        break;
                case PADCFG1_TERM_2K:
-                       arg = 2000;
+                       *arg = 2000;
                        break;
                case PADCFG1_TERM_5K:
-                       arg = 5000;
+                       *arg = 5000;
                        break;
                case PADCFG1_TERM_20K:
-                       arg = 20000;
+                       *arg = 20000;
                        break;
                }
 
@@ -564,35 +573,74 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
                case PADCFG1_TERM_1K:
                        if (!(community->features & PINCTRL_FEATURE_1K_PD))
                                return -EINVAL;
-                       arg = 1000;
+                       *arg = 1000;
                        break;
                case PADCFG1_TERM_5K:
-                       arg = 5000;
+                       *arg = 5000;
                        break;
                case PADCFG1_TERM_20K:
-                       arg = 20000;
+                       *arg = 20000;
                        break;
                }
 
                break;
 
-       case PIN_CONFIG_INPUT_DEBOUNCE: {
-               void __iomem *padcfg2;
-               u32 v;
+       default:
+               return -EINVAL;
+       }
 
-               padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
-               if (!padcfg2)
-                       return -ENOTSUPP;
+       return 0;
+}
 
-               v = readl(padcfg2);
-               if (!(v & PADCFG2_DEBEN))
-                       return -EINVAL;
+static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin,
+                                    enum pin_config_param param, u32 *arg)
+{
+       void __iomem *padcfg2;
+       unsigned long flags;
+       unsigned long v;
+       u32 value2;
+
+       padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
+       if (!padcfg2)
+               return -ENOTSUPP;
+
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+       value2 = readl(padcfg2);
+       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+       if (!(value2 & PADCFG2_DEBEN))
+               return -EINVAL;
+
+       v = (value2 & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT;
+       *arg = BIT(v) * DEBOUNCE_PERIOD_NSEC / NSEC_PER_USEC;
+
+       return 0;
+}
+
+static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+                           unsigned long *config)
+{
+       struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+       enum pin_config_param param = pinconf_to_config_param(*config);
+       u32 arg = 0;
+       int ret;
 
-               v = (v & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT;
-               arg = BIT(v) * DEBOUNCE_PERIOD_NSEC / NSEC_PER_USEC;
+       if (!intel_pad_owned_by_host(pctrl, pin))
+               return -ENOTSUPP;
 
+       switch (param) {
+       case PIN_CONFIG_BIAS_DISABLE:
+       case PIN_CONFIG_BIAS_PULL_UP:
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               ret = intel_config_get_pull(pctrl, pin, param, &arg);
+               if (ret)
+                       return ret;
+               break;
+
+       case PIN_CONFIG_INPUT_DEBOUNCE:
+               ret = intel_config_get_debounce(pctrl, pin, param, &arg);
+               if (ret)
+                       return ret;
                break;
-       }
 
        default:
                return -ENOTSUPP;
@@ -613,10 +661,11 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
        int ret = 0;
        u32 value;
 
-       raw_spin_lock_irqsave(&pctrl->lock, flags);
-
        community = intel_get_community(pctrl, pin);
        padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
+
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+
        value = readl(padcfg1);
 
        switch (param) {
@@ -686,7 +735,6 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
        void __iomem *padcfg0, *padcfg2;
        unsigned long flags;
        u32 value0, value2;
-       int ret = 0;
 
        padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
        if (!padcfg2)
@@ -708,23 +756,22 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
 
                v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
                if (v < 3 || v > 15) {
-                       ret = -EINVAL;
-                       goto exit_unlock;
-               } else {
-                       /* Enable glitch filter and debouncer */
-                       value0 |= PADCFG0_PREGFRXSEL;
-                       value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
-                       value2 |= PADCFG2_DEBEN;
+                       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+                       return -EINVAL;
                }
+
+               /* Enable glitch filter and debouncer */
+               value0 |= PADCFG0_PREGFRXSEL;
+               value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
+               value2 |= PADCFG2_DEBEN;
        }
 
        writel(value0, padcfg0);
        writel(value2, padcfg2);
 
-exit_unlock:
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
-       return ret;
+       return 0;
 }
 
 static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
@@ -894,6 +941,7 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
 static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 {
        struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
+       unsigned long flags;
        void __iomem *reg;
        u32 padcfg0;
        int pin;
@@ -906,8 +954,9 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
        if (!reg)
                return -EINVAL;
 
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
        padcfg0 = readl(reg);
-
+       raw_spin_unlock_irqrestore(&pctrl->lock, flags);
        if (padcfg0 & PADCFG0_PMODE_MASK)
                return -EINVAL;
 
@@ -1036,6 +1085,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
 
        intel_gpio_set_gpio_mode(reg);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(reg, true);
+
        value = readl(reg);
 
        value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
@@ -1081,22 +1133,27 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
-       const struct intel_community *community)
+static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
+                                           const struct intel_community *community)
 {
        struct gpio_chip *gc = &pctrl->chip;
-       irqreturn_t ret = IRQ_NONE;
-       int gpp;
+       unsigned int gpp;
+       int ret = 0;
 
        for (gpp = 0; gpp < community->ngpps; gpp++) {
                const struct intel_padgroup *padgrp = &community->gpps[gpp];
                unsigned long pending, enabled, gpp_offset;
+               unsigned long flags;
+
+               raw_spin_lock_irqsave(&pctrl->lock, flags);
 
                pending = readl(community->regs + community->is_offset +
                                padgrp->reg_num * 4);
                enabled = readl(community->regs + community->ie_offset +
                                padgrp->reg_num * 4);
 
+               raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
                /* Only interrupts that are enabled */
                pending &= enabled;
 
@@ -1106,9 +1163,9 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
                        irq = irq_find_mapping(gc->irq.domain,
                                               padgrp->gpio_base + gpp_offset);
                        generic_handle_irq(irq);
-
-                       ret |= IRQ_HANDLED;
                }
+
+               ret += pending ? 1 : 0;
        }
 
        return ret;
@@ -1118,16 +1175,16 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 {
        const struct intel_community *community;
        struct intel_pinctrl *pctrl = data;
-       irqreturn_t ret = IRQ_NONE;
-       int i;
+       unsigned int i;
+       int ret = 0;
 
        /* Need to check all communities for pending interrupts */
        for (i = 0; i < pctrl->ncommunities; i++) {
                community = &pctrl->communities[i];
-               ret |= intel_gpio_community_irq_handler(pctrl, community);
+               ret += intel_gpio_community_irq_handler(pctrl, community);
        }
 
-       return ret;
+       return IRQ_RETVAL(ret);
 }
 
 static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
@@ -1571,19 +1628,6 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
        }
 }
 
-static u32
-intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
-{
-       u32 requested = 0;
-       unsigned int i;
-
-       for (i = 0; i < size; i++)
-               if (gpiochip_is_requested(chip, base + i))
-                       requested |= BIT(i);
-
-       return requested;
-}
-
 static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
 {
        u32 curr, updated;
@@ -1604,12 +1648,16 @@ static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
        const struct intel_community *community = &pctrl->communities[c];
        const struct intel_padgroup *padgrp = &community->gpps[gpp];
        struct device *dev = pctrl->dev;
-       u32 requested;
+       const char *dummy;
+       u32 requested = 0;
+       unsigned int i;
 
        if (padgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
                return;
 
-       requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
+       for_each_requested_gpio_in_range(&pctrl->chip, i, padgrp->gpio_base, padgrp->size, dummy)
+               requested |= BIT(i);
+
        if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
                return;
 
index cc78c48..4e17308 100644 (file)
@@ -103,6 +103,8 @@ enum {
  * @gpps: Pad groups if the controller has variable size pad groups
  * @ngpps: Number of pad groups in this community
  * @pad_map: Optional non-linear mapping of the pads
+ * @nirqs: Optional total number of IRQs this community can generate
+ * @acpi_space_id: Optional address space ID for ACPI OpRegion handler
  * @regs: Community specific common registers (reserved for core driver)
  * @pad_regs: Community specific pad registers (reserved for core driver)
  *
@@ -127,6 +129,8 @@ struct intel_community {
        const struct intel_padgroup *gpps;
        size_t ngpps;
        const unsigned int *pad_map;
+       unsigned short nirqs;
+       unsigned short acpi_space_id;
 
        /* Reserved for the core driver */
        void __iomem *regs;
index a45b8f2..96589d0 100644 (file)
@@ -386,6 +386,16 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static void lp_gpio_enable_input(void __iomem *reg)
+{
+       iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
+}
+
+static void lp_gpio_disable_input(void __iomem *reg)
+{
+       iowrite32(ioread32(reg) | GPINDIS_BIT, reg);
+}
+
 static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
                                  struct pinctrl_gpio_range *range,
                                  unsigned int pin)
@@ -411,7 +421,7 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
        }
 
        /* Enable input sensing */
-       iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2);
+       lp_gpio_enable_input(conf2);
 
        raw_spin_unlock_irqrestore(&lg->lock, flags);
 
@@ -429,7 +439,7 @@ static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
        raw_spin_lock_irqsave(&lg->lock, flags);
 
        /* Disable input sensing */
-       iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2);
+       lp_gpio_disable_input(conf2);
 
        raw_spin_unlock_irqrestore(&lg->lock, flags);
 
@@ -919,16 +929,14 @@ static int lp_gpio_runtime_resume(struct device *dev)
 static int lp_gpio_resume(struct device *dev)
 {
        struct intel_pinctrl *lg = dev_get_drvdata(dev);
-       void __iomem *reg;
+       struct gpio_chip *chip = &lg->chip;
+       const char *dummy;
        int i;
 
        /* on some hardware suspend clears input sensing, re-enable it here */
-       for (i = 0; i < lg->chip.ngpio; i++) {
-               if (gpiochip_is_requested(&lg->chip, i) != NULL) {
-                       reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
-                       iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg);
-               }
-       }
+       for_each_requested_gpio(chip, i, dummy)
+               lp_gpio_enable_input(lp_gpio_reg(chip, i, LP_CONFIG2));
+
        return 0;
 }
 
@@ -951,7 +959,7 @@ static struct platform_driver lp_gpio_driver = {
        .driver         = {
                .name   = "lp_gpio",
                .pm     = &lp_gpio_pm_ops,
-               .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match),
+               .acpi_match_table = lynxpoint_gpio_acpi_match,
        },
 };
 
index 04ca8ae..e4ff8da 100644 (file)
@@ -135,7 +135,7 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
        PINCTRL_PIN(43, "GP83_SD_D3"),
        PINCTRL_PIN(44, "GP84_SD_LS_CLK_FB"),
        PINCTRL_PIN(45, "GP85_SD_LS_CMD_DIR"),
-       PINCTRL_PIN(46, "GP86_SD_LVL_D_DIR"),
+       PINCTRL_PIN(46, "GP86_SD_LS_D_DIR"),
        PINCTRL_PIN(47, "GP88_SD_LS_SEL"),
        PINCTRL_PIN(48, "GP87_SD_PD"),
        PINCTRL_PIN(49, "GP89_SD_WP"),
@@ -171,28 +171,28 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
        PINCTRL_PIN(77, "GP42_I2S_2_RXD"),
        PINCTRL_PIN(78, "GP43_I2S_2_TXD"),
        /* Family 6: GP SSP (22 pins) */
-       PINCTRL_PIN(79, "GP120_SPI_3_CLK"),
-       PINCTRL_PIN(80, "GP121_SPI_3_SS"),
-       PINCTRL_PIN(81, "GP122_SPI_3_RXD"),
-       PINCTRL_PIN(82, "GP123_SPI_3_TXD"),
-       PINCTRL_PIN(83, "GP102_SPI_4_CLK"),
-       PINCTRL_PIN(84, "GP103_SPI_4_SS_0"),
-       PINCTRL_PIN(85, "GP104_SPI_4_SS_1"),
-       PINCTRL_PIN(86, "GP105_SPI_4_SS_2"),
-       PINCTRL_PIN(87, "GP106_SPI_4_SS_3"),
-       PINCTRL_PIN(88, "GP107_SPI_4_RXD"),
-       PINCTRL_PIN(89, "GP108_SPI_4_TXD"),
-       PINCTRL_PIN(90, "GP109_SPI_5_CLK"),
-       PINCTRL_PIN(91, "GP110_SPI_5_SS_0"),
-       PINCTRL_PIN(92, "GP111_SPI_5_SS_1"),
-       PINCTRL_PIN(93, "GP112_SPI_5_SS_2"),
-       PINCTRL_PIN(94, "GP113_SPI_5_SS_3"),
-       PINCTRL_PIN(95, "GP114_SPI_5_RXD"),
-       PINCTRL_PIN(96, "GP115_SPI_5_TXD"),
-       PINCTRL_PIN(97, "GP116_SPI_6_CLK"),
-       PINCTRL_PIN(98, "GP117_SPI_6_SS"),
-       PINCTRL_PIN(99, "GP118_SPI_6_RXD"),
-       PINCTRL_PIN(100, "GP119_SPI_6_TXD"),
+       PINCTRL_PIN(79, "GP120_SPI_0_CLK"),
+       PINCTRL_PIN(80, "GP121_SPI_0_SS"),
+       PINCTRL_PIN(81, "GP122_SPI_0_RXD"),
+       PINCTRL_PIN(82, "GP123_SPI_0_TXD"),
+       PINCTRL_PIN(83, "GP102_SPI_1_CLK"),
+       PINCTRL_PIN(84, "GP103_SPI_1_SS0"),
+       PINCTRL_PIN(85, "GP104_SPI_1_SS1"),
+       PINCTRL_PIN(86, "GP105_SPI_1_SS2"),
+       PINCTRL_PIN(87, "GP106_SPI_1_SS3"),
+       PINCTRL_PIN(88, "GP107_SPI_1_RXD"),
+       PINCTRL_PIN(89, "GP108_SPI_1_TXD"),
+       PINCTRL_PIN(90, "GP109_SPI_2_CLK"),
+       PINCTRL_PIN(91, "GP110_SPI_2_SS0"),
+       PINCTRL_PIN(92, "GP111_SPI_2_SS1"),
+       PINCTRL_PIN(93, "GP112_SPI_2_SS2"),
+       PINCTRL_PIN(94, "GP113_SPI_2_SS3"),
+       PINCTRL_PIN(95, "GP114_SPI_2_RXD"),
+       PINCTRL_PIN(96, "GP115_SPI_2_TXD"),
+       PINCTRL_PIN(97, "GP116_SPI_3_CLK"),
+       PINCTRL_PIN(98, "GP117_SPI_3_SS"),
+       PINCTRL_PIN(99, "GP118_SPI_3_RXD"),
+       PINCTRL_PIN(100, "GP119_SPI_3_TXD"),
        /* Family 7: I2C (14 pins) */
        PINCTRL_PIN(101, "GP19_I2C_1_SCL"),
        PINCTRL_PIN(102, "GP20_I2C_1_SDA"),
@@ -340,6 +340,7 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
 };
 
 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
+static const unsigned int mrfld_i2s2_pins[] = { 75, 76, 77, 78 };
 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
 static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
 static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
@@ -351,6 +352,7 @@ static const unsigned int mrfld_pwm3_pins[] = { 133 };
 
 static const struct intel_pingroup mrfld_groups[] = {
        PIN_GROUP("sdio_grp", mrfld_sdio_pins, 1),
+       PIN_GROUP("i2s2_grp", mrfld_i2s2_pins, 1),
        PIN_GROUP("spi5_grp", mrfld_spi5_pins, 1),
        PIN_GROUP("uart0_grp", mrfld_uart0_pins, 1),
        PIN_GROUP("uart1_grp", mrfld_uart1_pins, 1),
@@ -362,6 +364,7 @@ static const struct intel_pingroup mrfld_groups[] = {
 };
 
 static const char * const mrfld_sdio_groups[] = { "sdio_grp" };
+static const char * const mrfld_i2s2_groups[] = { "i2s2_grp" };
 static const char * const mrfld_spi5_groups[] = { "spi5_grp" };
 static const char * const mrfld_uart0_groups[] = { "uart0_grp" };
 static const char * const mrfld_uart1_groups[] = { "uart1_grp" };
@@ -373,6 +376,7 @@ static const char * const mrfld_pwm3_groups[] = { "pwm3_grp" };
 
 static const struct intel_function mrfld_functions[] = {
        FUNCTION("sdio", mrfld_sdio_groups),
+       FUNCTION("i2s2", mrfld_i2s2_groups),
        FUNCTION("spi5", mrfld_spi5_groups),
        FUNCTION("uart0", mrfld_uart0_groups),
        FUNCTION("uart1", mrfld_uart1_groups),
index bcfd754..8c162dd 100644 (file)
@@ -380,8 +380,366 @@ static const struct intel_pinctrl_soc_data tgllp_soc_data = {
        .ncommunities = ARRAY_SIZE(tgllp_communities),
 };
 
+/* Tiger Lake-H */
+static const struct pinctrl_pin_desc tglh_pins[] = {
+       /* GPP_A */
+       PINCTRL_PIN(0, "SPI0_IO_2"),
+       PINCTRL_PIN(1, "SPI0_IO_3"),
+       PINCTRL_PIN(2, "SPI0_MOSI_IO_0"),
+       PINCTRL_PIN(3, "SPI0_MISO_IO_1"),
+       PINCTRL_PIN(4, "SPI0_TPM_CSB"),
+       PINCTRL_PIN(5, "SPI0_FLASH_0_CSB"),
+       PINCTRL_PIN(6, "SPI0_FLASH_1_CSB"),
+       PINCTRL_PIN(7, "SPI0_CLK"),
+       PINCTRL_PIN(8, "ESPI_IO_0"),
+       PINCTRL_PIN(9, "ESPI_IO_1"),
+       PINCTRL_PIN(10, "ESPI_IO_2"),
+       PINCTRL_PIN(11, "ESPI_IO_3"),
+       PINCTRL_PIN(12, "ESPI_CS0B"),
+       PINCTRL_PIN(13, "ESPI_CLK"),
+       PINCTRL_PIN(14, "ESPI_RESETB"),
+       PINCTRL_PIN(15, "ESPI_CS1B"),
+       PINCTRL_PIN(16, "ESPI_CS2B"),
+       PINCTRL_PIN(17, "ESPI_CS3B"),
+       PINCTRL_PIN(18, "ESPI_ALERT0B"),
+       PINCTRL_PIN(19, "ESPI_ALERT1B"),
+       PINCTRL_PIN(20, "ESPI_ALERT2B"),
+       PINCTRL_PIN(21, "ESPI_ALERT3B"),
+       PINCTRL_PIN(22, "GPPC_A_14"),
+       PINCTRL_PIN(23, "SPI0_CLK_LOOPBK"),
+       PINCTRL_PIN(24, "ESPI_CLK_LOOPBK"),
+       /* GPP_R */
+       PINCTRL_PIN(25, "HDA_BCLK"),
+       PINCTRL_PIN(26, "HDA_SYNC"),
+       PINCTRL_PIN(27, "HDA_SDO"),
+       PINCTRL_PIN(28, "HDA_SDI_0"),
+       PINCTRL_PIN(29, "HDA_RSTB"),
+       PINCTRL_PIN(30, "HDA_SDI_1"),
+       PINCTRL_PIN(31, "GPP_R_6"),
+       PINCTRL_PIN(32, "GPP_R_7"),
+       PINCTRL_PIN(33, "GPP_R_8"),
+       PINCTRL_PIN(34, "PCIE_LNK_DOWN"),
+       PINCTRL_PIN(35, "ISH_UART0_RTSB"),
+       PINCTRL_PIN(36, "SX_EXIT_HOLDOFFB"),
+       PINCTRL_PIN(37, "CLKOUT_48"),
+       PINCTRL_PIN(38, "ISH_GP_7"),
+       PINCTRL_PIN(39, "ISH_GP_0"),
+       PINCTRL_PIN(40, "ISH_GP_1"),
+       PINCTRL_PIN(41, "ISH_GP_2"),
+       PINCTRL_PIN(42, "ISH_GP_3"),
+       PINCTRL_PIN(43, "ISH_GP_4"),
+       PINCTRL_PIN(44, "ISH_GP_5"),
+       /* GPP_B */
+       PINCTRL_PIN(45, "GSPI0_CS1B"),
+       PINCTRL_PIN(46, "GSPI1_CS1B"),
+       PINCTRL_PIN(47, "VRALERTB"),
+       PINCTRL_PIN(48, "CPU_GP_2"),
+       PINCTRL_PIN(49, "CPU_GP_3"),
+       PINCTRL_PIN(50, "SRCCLKREQB_0"),
+       PINCTRL_PIN(51, "SRCCLKREQB_1"),
+       PINCTRL_PIN(52, "SRCCLKREQB_2"),
+       PINCTRL_PIN(53, "SRCCLKREQB_3"),
+       PINCTRL_PIN(54, "SRCCLKREQB_4"),
+       PINCTRL_PIN(55, "SRCCLKREQB_5"),
+       PINCTRL_PIN(56, "I2S_MCLK"),
+       PINCTRL_PIN(57, "SLP_S0B"),
+       PINCTRL_PIN(58, "PLTRSTB"),
+       PINCTRL_PIN(59, "SPKR"),
+       PINCTRL_PIN(60, "GSPI0_CS0B"),
+       PINCTRL_PIN(61, "GSPI0_CLK"),
+       PINCTRL_PIN(62, "GSPI0_MISO"),
+       PINCTRL_PIN(63, "GSPI0_MOSI"),
+       PINCTRL_PIN(64, "GSPI1_CS0B"),
+       PINCTRL_PIN(65, "GSPI1_CLK"),
+       PINCTRL_PIN(66, "GSPI1_MISO"),
+       PINCTRL_PIN(67, "GSPI1_MOSI"),
+       PINCTRL_PIN(68, "SML1ALERTB"),
+       PINCTRL_PIN(69, "GSPI0_CLK_LOOPBK"),
+       PINCTRL_PIN(70, "GSPI1_CLK_LOOPBK"),
+       /* vGPIO_0 */
+       PINCTRL_PIN(71, "ESPI_USB_OCB_0"),
+       PINCTRL_PIN(72, "ESPI_USB_OCB_1"),
+       PINCTRL_PIN(73, "ESPI_USB_OCB_2"),
+       PINCTRL_PIN(74, "ESPI_USB_OCB_3"),
+       PINCTRL_PIN(75, "USB_CPU_OCB_0"),
+       PINCTRL_PIN(76, "USB_CPU_OCB_1"),
+       PINCTRL_PIN(77, "USB_CPU_OCB_2"),
+       PINCTRL_PIN(78, "USB_CPU_OCB_3"),
+       /* GPP_D */
+       PINCTRL_PIN(79, "SPI1_CSB"),
+       PINCTRL_PIN(80, "SPI1_CLK"),
+       PINCTRL_PIN(81, "SPI1_MISO_IO_1"),
+       PINCTRL_PIN(82, "SPI1_MOSI_IO_0"),
+       PINCTRL_PIN(83, "SML1CLK"),
+       PINCTRL_PIN(84, "I2S2_SFRM"),
+       PINCTRL_PIN(85, "I2S2_TXD"),
+       PINCTRL_PIN(86, "I2S2_RXD"),
+       PINCTRL_PIN(87, "I2S2_SCLK"),
+       PINCTRL_PIN(88, "SML0CLK"),
+       PINCTRL_PIN(89, "SML0DATA"),
+       PINCTRL_PIN(90, "GPP_D_11"),
+       PINCTRL_PIN(91, "ISH_UART0_CTSB"),
+       PINCTRL_PIN(92, "SPI1_IO_2"),
+       PINCTRL_PIN(93, "SPI1_IO_3"),
+       PINCTRL_PIN(94, "SML1DATA"),
+       PINCTRL_PIN(95, "GSPI3_CS0B"),
+       PINCTRL_PIN(96, "GSPI3_CLK"),
+       PINCTRL_PIN(97, "GSPI3_MISO"),
+       PINCTRL_PIN(98, "GSPI3_MOSI"),
+       PINCTRL_PIN(99, "UART3_RXD"),
+       PINCTRL_PIN(100, "UART3_TXD"),
+       PINCTRL_PIN(101, "UART3_RTSB"),
+       PINCTRL_PIN(102, "UART3_CTSB"),
+       PINCTRL_PIN(103, "SPI1_CLK_LOOPBK"),
+       PINCTRL_PIN(104, "GSPI3_CLK_LOOPBK"),
+       /* GPP_C */
+       PINCTRL_PIN(105, "SMBCLK"),
+       PINCTRL_PIN(106, "SMBDATA"),
+       PINCTRL_PIN(107, "SMBALERTB"),
+       PINCTRL_PIN(108, "ISH_UART0_RXD"),
+       PINCTRL_PIN(109, "ISH_UART0_TXD"),
+       PINCTRL_PIN(110, "SML0ALERTB"),
+       PINCTRL_PIN(111, "ISH_I2C2_SDA"),
+       PINCTRL_PIN(112, "ISH_I2C2_SCL"),
+       PINCTRL_PIN(113, "UART0_RXD"),
+       PINCTRL_PIN(114, "UART0_TXD"),
+       PINCTRL_PIN(115, "UART0_RTSB"),
+       PINCTRL_PIN(116, "UART0_CTSB"),
+       PINCTRL_PIN(117, "UART1_RXD"),
+       PINCTRL_PIN(118, "UART1_TXD"),
+       PINCTRL_PIN(119, "UART1_RTSB"),
+       PINCTRL_PIN(120, "UART1_CTSB"),
+       PINCTRL_PIN(121, "I2C0_SDA"),
+       PINCTRL_PIN(122, "I2C0_SCL"),
+       PINCTRL_PIN(123, "I2C1_SDA"),
+       PINCTRL_PIN(124, "I2C1_SCL"),
+       PINCTRL_PIN(125, "UART2_RXD"),
+       PINCTRL_PIN(126, "UART2_TXD"),
+       PINCTRL_PIN(127, "UART2_RTSB"),
+       PINCTRL_PIN(128, "UART2_CTSB"),
+       /* GPP_S */
+       PINCTRL_PIN(129, "SNDW1_CLK"),
+       PINCTRL_PIN(130, "SNDW1_DATA"),
+       PINCTRL_PIN(131, "SNDW2_CLK"),
+       PINCTRL_PIN(132, "SNDW2_DATA"),
+       PINCTRL_PIN(133, "SNDW3_CLK"),
+       PINCTRL_PIN(134, "SNDW3_DATA"),
+       PINCTRL_PIN(135, "SNDW4_CLK"),
+       PINCTRL_PIN(136, "SNDW4_DATA"),
+       /* GPP_G */
+       PINCTRL_PIN(137, "DDPA_CTRLCLK"),
+       PINCTRL_PIN(138, "DDPA_CTRLDATA"),
+       PINCTRL_PIN(139, "DNX_FORCE_RELOAD"),
+       PINCTRL_PIN(140, "GMII_MDC_0"),
+       PINCTRL_PIN(141, "GMII_MDIO_0"),
+       PINCTRL_PIN(142, "SLP_DRAMB"),
+       PINCTRL_PIN(143, "GPPC_G_6"),
+       PINCTRL_PIN(144, "GPPC_G_7"),
+       PINCTRL_PIN(145, "ISH_SPI_CSB"),
+       PINCTRL_PIN(146, "ISH_SPI_CLK"),
+       PINCTRL_PIN(147, "ISH_SPI_MISO"),
+       PINCTRL_PIN(148, "ISH_SPI_MOSI"),
+       PINCTRL_PIN(149, "DDP1_CTRLCLK"),
+       PINCTRL_PIN(150, "DDP1_CTRLDATA"),
+       PINCTRL_PIN(151, "DDP2_CTRLCLK"),
+       PINCTRL_PIN(152, "DDP2_CTRLDATA"),
+       PINCTRL_PIN(153, "GSPI2_CLK_LOOPBK"),
+       /* vGPIO */
+       PINCTRL_PIN(154, "CNV_BTEN"),
+       PINCTRL_PIN(155, "CNV_BT_HOST_WAKEB"),
+       PINCTRL_PIN(156, "CNV_BT_IF_SELECT"),
+       PINCTRL_PIN(157, "vCNV_BT_UART_TXD"),
+       PINCTRL_PIN(158, "vCNV_BT_UART_RXD"),
+       PINCTRL_PIN(159, "vCNV_BT_UART_CTS_B"),
+       PINCTRL_PIN(160, "vCNV_BT_UART_RTS_B"),
+       PINCTRL_PIN(161, "vCNV_MFUART1_TXD"),
+       PINCTRL_PIN(162, "vCNV_MFUART1_RXD"),
+       PINCTRL_PIN(163, "vCNV_MFUART1_CTS_B"),
+       PINCTRL_PIN(164, "vCNV_MFUART1_RTS_B"),
+       PINCTRL_PIN(165, "vUART0_TXD"),
+       PINCTRL_PIN(166, "vUART0_RXD"),
+       PINCTRL_PIN(167, "vUART0_CTS_B"),
+       PINCTRL_PIN(168, "vUART0_RTS_B"),
+       PINCTRL_PIN(169, "vISH_UART0_TXD"),
+       PINCTRL_PIN(170, "vISH_UART0_RXD"),
+       PINCTRL_PIN(171, "vISH_UART0_CTS_B"),
+       PINCTRL_PIN(172, "vISH_UART0_RTS_B"),
+       PINCTRL_PIN(173, "vCNV_BT_I2S_BCLK"),
+       PINCTRL_PIN(174, "vCNV_BT_I2S_WS_SYNC"),
+       PINCTRL_PIN(175, "vCNV_BT_I2S_SDO"),
+       PINCTRL_PIN(176, "vCNV_BT_I2S_SDI"),
+       PINCTRL_PIN(177, "vI2S2_SCLK"),
+       PINCTRL_PIN(178, "vI2S2_SFRM"),
+       PINCTRL_PIN(179, "vI2S2_TXD"),
+       PINCTRL_PIN(180, "vI2S2_RXD"),
+       /* GPP_E */
+       PINCTRL_PIN(181, "SATAXPCIE_0"),
+       PINCTRL_PIN(182, "SATAXPCIE_1"),
+       PINCTRL_PIN(183, "SATAXPCIE_2"),
+       PINCTRL_PIN(184, "CPU_GP_0"),
+       PINCTRL_PIN(185, "SATA_DEVSLP_0"),
+       PINCTRL_PIN(186, "SATA_DEVSLP_1"),
+       PINCTRL_PIN(187, "SATA_DEVSLP_2"),
+       PINCTRL_PIN(188, "CPU_GP_1"),
+       PINCTRL_PIN(189, "SATA_LEDB"),
+       PINCTRL_PIN(190, "USB2_OCB_0"),
+       PINCTRL_PIN(191, "USB2_OCB_1"),
+       PINCTRL_PIN(192, "USB2_OCB_2"),
+       PINCTRL_PIN(193, "USB2_OCB_3"),
+       /* GPP_F */
+       PINCTRL_PIN(194, "SATAXPCIE_3"),
+       PINCTRL_PIN(195, "SATAXPCIE_4"),
+       PINCTRL_PIN(196, "SATAXPCIE_5"),
+       PINCTRL_PIN(197, "SATAXPCIE_6"),
+       PINCTRL_PIN(198, "SATAXPCIE_7"),
+       PINCTRL_PIN(199, "SATA_DEVSLP_3"),
+       PINCTRL_PIN(200, "SATA_DEVSLP_4"),
+       PINCTRL_PIN(201, "SATA_DEVSLP_5"),
+       PINCTRL_PIN(202, "SATA_DEVSLP_6"),
+       PINCTRL_PIN(203, "SATA_DEVSLP_7"),
+       PINCTRL_PIN(204, "SATA_SCLOCK"),
+       PINCTRL_PIN(205, "SATA_SLOAD"),
+       PINCTRL_PIN(206, "SATA_SDATAOUT1"),
+       PINCTRL_PIN(207, "SATA_SDATAOUT0"),
+       PINCTRL_PIN(208, "PS_ONB"),
+       PINCTRL_PIN(209, "M2_SKT2_CFG_0"),
+       PINCTRL_PIN(210, "M2_SKT2_CFG_1"),
+       PINCTRL_PIN(211, "M2_SKT2_CFG_2"),
+       PINCTRL_PIN(212, "M2_SKT2_CFG_3"),
+       PINCTRL_PIN(213, "L_VDDEN"),
+       PINCTRL_PIN(214, "L_BKLTEN"),
+       PINCTRL_PIN(215, "L_BKLTCTL"),
+       PINCTRL_PIN(216, "VNN_CTRL"),
+       PINCTRL_PIN(217, "GPP_F_23"),
+       /* GPP_H */
+       PINCTRL_PIN(218, "SRCCLKREQB_6"),
+       PINCTRL_PIN(219, "SRCCLKREQB_7"),
+       PINCTRL_PIN(220, "SRCCLKREQB_8"),
+       PINCTRL_PIN(221, "SRCCLKREQB_9"),
+       PINCTRL_PIN(222, "SRCCLKREQB_10"),
+       PINCTRL_PIN(223, "SRCCLKREQB_11"),
+       PINCTRL_PIN(224, "SRCCLKREQB_12"),
+       PINCTRL_PIN(225, "SRCCLKREQB_13"),
+       PINCTRL_PIN(226, "SRCCLKREQB_14"),
+       PINCTRL_PIN(227, "SRCCLKREQB_15"),
+       PINCTRL_PIN(228, "SML2CLK"),
+       PINCTRL_PIN(229, "SML2DATA"),
+       PINCTRL_PIN(230, "SML2ALERTB"),
+       PINCTRL_PIN(231, "SML3CLK"),
+       PINCTRL_PIN(232, "SML3DATA"),
+       PINCTRL_PIN(233, "SML3ALERTB"),
+       PINCTRL_PIN(234, "SML4CLK"),
+       PINCTRL_PIN(235, "SML4DATA"),
+       PINCTRL_PIN(236, "SML4ALERTB"),
+       PINCTRL_PIN(237, "ISH_I2C0_SDA"),
+       PINCTRL_PIN(238, "ISH_I2C0_SCL"),
+       PINCTRL_PIN(239, "ISH_I2C1_SDA"),
+       PINCTRL_PIN(240, "ISH_I2C1_SCL"),
+       PINCTRL_PIN(241, "TIME_SYNC_0"),
+       /* GPP_J */
+       PINCTRL_PIN(242, "CNV_PA_BLANKING"),
+       PINCTRL_PIN(243, "CPU_C10_GATEB"),
+       PINCTRL_PIN(244, "CNV_BRI_DT"),
+       PINCTRL_PIN(245, "CNV_BRI_RSP"),
+       PINCTRL_PIN(246, "CNV_RGI_DT"),
+       PINCTRL_PIN(247, "CNV_RGI_RSP"),
+       PINCTRL_PIN(248, "CNV_MFUART2_RXD"),
+       PINCTRL_PIN(249, "CNV_MFUART2_TXD"),
+       PINCTRL_PIN(250, "GPP_J_8"),
+       PINCTRL_PIN(251, "GPP_J_9"),
+       /* GPP_K */
+       PINCTRL_PIN(252, "GSXDOUT"),
+       PINCTRL_PIN(253, "GSXSLOAD"),
+       PINCTRL_PIN(254, "GSXDIN"),
+       PINCTRL_PIN(255, "GSXSRESETB"),
+       PINCTRL_PIN(256, "GSXCLK"),
+       PINCTRL_PIN(257, "ADR_COMPLETE"),
+       PINCTRL_PIN(258, "DDSP_HPD_A"),
+       PINCTRL_PIN(259, "DDSP_HPD_B"),
+       PINCTRL_PIN(260, "CORE_VID_0"),
+       PINCTRL_PIN(261, "CORE_VID_1"),
+       PINCTRL_PIN(262, "DDSP_HPD_C"),
+       PINCTRL_PIN(263, "GPP_K_11"),
+       PINCTRL_PIN(264, "SYS_PWROK"),
+       PINCTRL_PIN(265, "SYS_RESETB"),
+       PINCTRL_PIN(266, "MLK_RSTB"),
+       /* GPP_I */
+       PINCTRL_PIN(267, "PMCALERTB"),
+       PINCTRL_PIN(268, "DDSP_HPD_1"),
+       PINCTRL_PIN(269, "DDSP_HPD_2"),
+       PINCTRL_PIN(270, "DDSP_HPD_3"),
+       PINCTRL_PIN(271, "DDSP_HPD_4"),
+       PINCTRL_PIN(272, "DDPB_CTRLCLK"),
+       PINCTRL_PIN(273, "DDPB_CTRLDATA"),
+       PINCTRL_PIN(274, "DDPC_CTRLCLK"),
+       PINCTRL_PIN(275, "DDPC_CTRLDATA"),
+       PINCTRL_PIN(276, "FUSA_DIAGTEST_EN"),
+       PINCTRL_PIN(277, "FUSA_DIAGTEST_MODE"),
+       PINCTRL_PIN(278, "USB2_OCB_4"),
+       PINCTRL_PIN(279, "USB2_OCB_5"),
+       PINCTRL_PIN(280, "USB2_OCB_6"),
+       PINCTRL_PIN(281, "USB2_OCB_7"),
+       /* JTAG */
+       PINCTRL_PIN(282, "JTAG_TDO"),
+       PINCTRL_PIN(283, "JTAGX"),
+       PINCTRL_PIN(284, "PRDYB"),
+       PINCTRL_PIN(285, "PREQB"),
+       PINCTRL_PIN(286, "JTAG_TDI"),
+       PINCTRL_PIN(287, "JTAG_TMS"),
+       PINCTRL_PIN(288, "JTAG_TCK"),
+       PINCTRL_PIN(289, "DBG_PMODE"),
+       PINCTRL_PIN(290, "CPU_TRSTB"),
+};
+
+static const struct intel_padgroup tglh_community0_gpps[] = {
+       TGL_GPP(0, 0, 24, 0),                           /* GPP_A */
+       TGL_GPP(1, 25, 44, 128),                        /* GPP_R */
+       TGL_GPP(2, 45, 70, 32),                         /* GPP_B */
+       TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP),      /* vGPIO_0 */
+};
+
+static const struct intel_padgroup tglh_community1_gpps[] = {
+       TGL_GPP(0, 79, 104, 96),                        /* GPP_D */
+       TGL_GPP(1, 105, 128, 64),                       /* GPP_C */
+       TGL_GPP(2, 129, 136, 160),                      /* GPP_S */
+       TGL_GPP(3, 137, 153, 192),                      /* GPP_G */
+       TGL_GPP(4, 154, 180, 224),                      /* vGPIO */
+};
+
+static const struct intel_padgroup tglh_community3_gpps[] = {
+       TGL_GPP(0, 181, 193, 256),                      /* GPP_E */
+       TGL_GPP(1, 194, 217, 288),                      /* GPP_F */
+};
+
+static const struct intel_padgroup tglh_community4_gpps[] = {
+       TGL_GPP(0, 218, 241, 320),                      /* GPP_H */
+       TGL_GPP(1, 242, 251, 384),                      /* GPP_J */
+       TGL_GPP(2, 252, 266, 352),                      /* GPP_K */
+};
+
+static const struct intel_padgroup tglh_community5_gpps[] = {
+       TGL_GPP(0, 267, 281, 416),                      /* GPP_I */
+       TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP),    /* JTAG */
+};
+
+static const struct intel_community tglh_communities[] = {
+       TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps),
+       TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps),
+       TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps),
+       TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps),
+       TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tglh_soc_data = {
+       .pins = tglh_pins,
+       .npins = ARRAY_SIZE(tglh_pins),
+       .communities = tglh_communities,
+       .ncommunities = ARRAY_SIZE(tglh_communities),
+};
+
 static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
        { "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
+       { "INT34C6", (kernel_ulong_t)&tglh_soc_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
index f32d364..1cedc5f 100644 (file)
@@ -93,6 +93,18 @@ config PINCTRL_MT6765
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
+config PINCTRL_MT6779
+       tristate "Mediatek MT6779 pin control"
+       depends on OF
+       depends on ARM64 || COMPILE_TEST
+       default ARM64 && ARCH_MEDIATEK
+       select PINCTRL_MTK_PARIS
+       help
+         Say yes here to support pin controller and gpio driver
+         on Mediatek MT6779 SoC.
+         In MTK platform, we support virtual gpio and use it to
+         map specific eint which doesn't have real gpio pin.
+
 config PINCTRL_MT6797
        bool "Mediatek MT6797 pin control"
        depends on OF
index 4b71328..b0b07c5 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_MT2712)  += pinctrl-mt2712.o
 obj-$(CONFIG_PINCTRL_MT8135)   += pinctrl-mt8135.o
 obj-$(CONFIG_PINCTRL_MT8127)   += pinctrl-mt8127.o
 obj-$(CONFIG_PINCTRL_MT6765)   += pinctrl-mt6765.o
+obj-$(CONFIG_PINCTRL_MT6779)   += pinctrl-mt6779.o
 obj-$(CONFIG_PINCTRL_MT6797)   += pinctrl-mt6797.o
 obj-$(CONFIG_PINCTRL_MT7622)   += pinctrl-mt7622.o
 obj-$(CONFIG_PINCTRL_MT7623)   += pinctrl-mt7623.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6779.c b/drivers/pinctrl/mediatek/pinctrl-mt6779.c
new file mode 100644 (file)
index 0000000..bb0851c
--- /dev/null
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt6779.h"
+#include "pinctrl-paris.h"
+
+/* MT6779 have multiple bases to program pin configuration listed as the below:
+ * gpio:0x10005000,     iocfg_rm:0x11C20000, iocfg_br:0x11D10000,
+ * iocfg_lm:0x11E20000, iocfg_lb:0x11E70000, iocfg_rt:0x11EA0000,
+ * iocfg_lt:0x11F20000, iocfg_tl:0x11F30000
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+       PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+                      32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+       PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits,  \
+                      32, 1)
+
+static const struct mtk_pin_field_calc mt6779_pin_mode_range[] = {
+       PIN_FIELD_BASE(0, 7, 0, 0x0300, 0x10, 0, 4),
+       PIN_FIELD_BASE(8, 15, 0, 0x0310, 0x10, 0, 4),
+       PIN_FIELD_BASE(16, 23, 0, 0x0320, 0x10, 0, 4),
+       PIN_FIELD_BASE(24, 31, 0, 0x0330, 0x10, 0, 4),
+       PIN_FIELD_BASE(32, 39, 0, 0x0340, 0x10, 0, 4),
+       PIN_FIELD_BASE(40, 47, 0, 0x0350, 0x10, 0, 4),
+       PIN_FIELD_BASE(48, 55, 0, 0x0360, 0x10, 0, 4),
+       PIN_FIELD_BASE(56, 63, 0, 0x0370, 0x10, 0, 4),
+       PIN_FIELD_BASE(64, 71, 0, 0x0380, 0x10, 0, 4),
+       PIN_FIELD_BASE(72, 79, 0, 0x0390, 0x10, 0, 4),
+       PIN_FIELD_BASE(80, 87, 0, 0x03A0, 0x10, 0, 4),
+       PIN_FIELD_BASE(88, 95, 0, 0x03B0, 0x10, 0, 4),
+       PIN_FIELD_BASE(96, 103, 0, 0x03C0, 0x10, 0, 4),
+       PIN_FIELD_BASE(104, 111, 0, 0x03D0, 0x10, 0, 4),
+       PIN_FIELD_BASE(112, 119, 0, 0x03E0, 0x10, 0, 4),
+       PIN_FIELD_BASE(120, 127, 0, 0x03F0, 0x10, 0, 4),
+       PIN_FIELD_BASE(128, 135, 0, 0x0400, 0x10, 0, 4),
+       PIN_FIELD_BASE(136, 143, 0, 0x0410, 0x10, 0, 4),
+       PIN_FIELD_BASE(144, 151, 0, 0x0420, 0x10, 0, 4),
+       PIN_FIELD_BASE(152, 159, 0, 0x0430, 0x10, 0, 4),
+       PIN_FIELD_BASE(160, 167, 0, 0x0440, 0x10, 0, 4),
+       PIN_FIELD_BASE(168, 175, 0, 0x0450, 0x10, 0, 4),
+       PIN_FIELD_BASE(176, 183, 0, 0x0460, 0x10, 0, 4),
+       PIN_FIELD_BASE(184, 191, 0, 0x0470, 0x10, 0, 4),
+       PIN_FIELD_BASE(192, 199, 0, 0x0480, 0x10, 0, 4),
+       PIN_FIELD_BASE(200, 202, 0, 0x0490, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_dir_range[] = {
+       PIN_FIELD_BASE(0, 31, 0, 0x0000, 0x10, 0, 1),
+       PIN_FIELD_BASE(32, 63, 0, 0x0010, 0x10, 0, 1),
+       PIN_FIELD_BASE(64, 95, 0, 0x0020, 0x10, 0, 1),
+       PIN_FIELD_BASE(96, 127, 0, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(128, 159, 0, 0x0040, 0x10, 0, 1),
+       PIN_FIELD_BASE(160, 191, 0, 0x0050, 0x10, 0, 1),
+       PIN_FIELD_BASE(192, 202, 0, 0x0060, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_di_range[] = {
+       PIN_FIELD_BASE(0, 31, 0, 0x0200, 0x10, 0, 1),
+       PIN_FIELD_BASE(32, 63, 0, 0x0210, 0x10, 0, 1),
+       PIN_FIELD_BASE(64, 95, 0, 0x0220, 0x10, 0, 1),
+       PIN_FIELD_BASE(96, 127, 0, 0x0230, 0x10, 0, 1),
+       PIN_FIELD_BASE(128, 159, 0, 0x0240, 0x10, 0, 1),
+       PIN_FIELD_BASE(160, 191, 0, 0x0250, 0x10, 0, 1),
+       PIN_FIELD_BASE(192, 202, 0, 0x0260, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_do_range[] = {
+       PIN_FIELD_BASE(0, 31, 0, 0x0100, 0x10, 0, 1),
+       PIN_FIELD_BASE(32, 63, 0, 0x0110, 0x10, 0, 1),
+       PIN_FIELD_BASE(64, 95, 0, 0x0120, 0x10, 0, 1),
+       PIN_FIELD_BASE(96, 127, 0, 0x0130, 0x10, 0, 1),
+       PIN_FIELD_BASE(128, 159, 0, 0x0140, 0x10, 0, 1),
+       PIN_FIELD_BASE(160, 191, 0, 0x0150, 0x10, 0, 1),
+       PIN_FIELD_BASE(192, 202, 0, 0x0160, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_ies_range[] = {
+       PIN_FIELD_BASE(0, 9, 6, 0x0030, 0x10, 3, 1),
+       PIN_FIELD_BASE(10, 16, 3, 0x0050, 0x10, 0, 1),
+       PIN_FIELD_BASE(17, 18, 6, 0x0030, 0x10, 28, 1),
+       PIN_FIELD_BASE(19, 19, 6, 0x0030, 0x10, 27, 1),
+       PIN_FIELD_BASE(20, 20, 6, 0x0030, 0x10, 26, 1),
+       PIN_FIELD_BASE(21, 24, 6, 0x0030, 0x10, 19, 1),
+       PIN_FIELD_BASE(25, 25, 6, 0x0030, 0x10, 30, 1),
+       PIN_FIELD_BASE(26, 26, 6, 0x0030, 0x10, 23, 1),
+       PIN_FIELD_BASE(27, 27, 6, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(28, 29, 6, 0x0030, 0x10, 24, 1),
+       PIN_FIELD_BASE(30, 30, 6, 0x0030, 0x10, 16, 1),
+       PIN_FIELD_BASE(31, 31, 6, 0x0030, 0x10, 13, 1),
+       PIN_FIELD_BASE(32, 32, 6, 0x0030, 0x10, 15, 1),
+       PIN_FIELD_BASE(33, 33, 6, 0x0030, 0x10, 17, 1),
+       PIN_FIELD_BASE(34, 34, 6, 0x0030, 0x10, 14, 1),
+       PIN_FIELD_BASE(35, 35, 6, 0x0040, 0x10, 4, 1),
+       PIN_FIELD_BASE(36, 36, 6, 0x0030, 0x10, 31, 1),
+       PIN_FIELD_BASE(37, 37, 6, 0x0040, 0x10, 5, 1),
+       PIN_FIELD_BASE(38, 41, 6, 0x0040, 0x10, 0, 1),
+       PIN_FIELD_BASE(42, 43, 6, 0x0030, 0x10, 1, 1),
+       PIN_FIELD_BASE(44, 44, 6, 0x0030, 0x10, 18, 1),
+       PIN_FIELD_BASE(45, 45, 3, 0x0050, 0x10, 14, 1),
+       PIN_FIELD_BASE(46, 46, 3, 0x0050, 0x10, 22, 1),
+       PIN_FIELD_BASE(47, 47, 3, 0x0050, 0x10, 25, 1),
+       PIN_FIELD_BASE(48, 48, 3, 0x0050, 0x10, 24, 1),
+       PIN_FIELD_BASE(49, 49, 3, 0x0050, 0x10, 26, 1),
+       PIN_FIELD_BASE(50, 50, 3, 0x0050, 0x10, 23, 1),
+       PIN_FIELD_BASE(51, 51, 3, 0x0050, 0x10, 11, 1),
+       PIN_FIELD_BASE(52, 52, 3, 0x0050, 0x10, 19, 1),
+       PIN_FIELD_BASE(53, 54, 3, 0x0050, 0x10, 27, 1),
+       PIN_FIELD_BASE(55, 55, 3, 0x0050, 0x10, 13, 1),
+       PIN_FIELD_BASE(56, 56, 3, 0x0050, 0x10, 21, 1),
+       PIN_FIELD_BASE(57, 57, 3, 0x0050, 0x10, 10, 1),
+       PIN_FIELD_BASE(58, 58, 3, 0x0050, 0x10, 9, 1),
+       PIN_FIELD_BASE(59, 60, 3, 0x0050, 0x10, 7, 1),
+       PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 12, 1),
+       PIN_FIELD_BASE(62, 62, 3, 0x0050, 0x10, 20, 1),
+       PIN_FIELD_BASE(63, 63, 3, 0x0050, 0x10, 17, 1),
+       PIN_FIELD_BASE(64, 64, 3, 0x0050, 0x10, 16, 1),
+       PIN_FIELD_BASE(65, 65, 3, 0x0050, 0x10, 18, 1),
+       PIN_FIELD_BASE(66, 66, 3, 0x0050, 0x10, 15, 1),
+       PIN_FIELD_BASE(67, 67, 2, 0x0060, 0x10, 7, 1),
+       PIN_FIELD_BASE(68, 68, 2, 0x0060, 0x10, 6, 1),
+       PIN_FIELD_BASE(69, 69, 2, 0x0060, 0x10, 8, 1),
+       PIN_FIELD_BASE(70, 71, 2, 0x0060, 0x10, 4, 1),
+       PIN_FIELD_BASE(72, 72, 4, 0x0020, 0x10, 3, 1),
+       PIN_FIELD_BASE(73, 73, 4, 0x0020, 0x10, 2, 1),
+       PIN_FIELD_BASE(74, 74, 4, 0x0020, 0x10, 1, 1),
+       PIN_FIELD_BASE(75, 75, 4, 0x0020, 0x10, 4, 1),
+       PIN_FIELD_BASE(76, 76, 4, 0x0020, 0x10, 12, 1),
+       PIN_FIELD_BASE(77, 77, 4, 0x0020, 0x10, 11, 1),
+       PIN_FIELD_BASE(78, 78, 2, 0x0050, 0x10, 18, 1),
+       PIN_FIELD_BASE(79, 79, 2, 0x0050, 0x10, 17, 1),
+       PIN_FIELD_BASE(80, 81, 2, 0x0050, 0x10, 19, 1),
+       PIN_FIELD_BASE(82, 88, 2, 0x0050, 0x10, 1, 1),
+       PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 16, 1),
+       PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 15, 1),
+       PIN_FIELD_BASE(91, 91, 2, 0x0050, 0x10, 14, 1),
+       PIN_FIELD_BASE(92, 92, 2, 0x0050, 0x10, 8, 1),
+       PIN_FIELD_BASE(93, 93, 4, 0x0020, 0x10, 0, 1),
+       PIN_FIELD_BASE(94, 94, 2, 0x0050, 0x10, 0, 1),
+       PIN_FIELD_BASE(95, 95, 4, 0x0020, 0x10, 7, 1),
+       PIN_FIELD_BASE(96, 96, 4, 0x0020, 0x10, 5, 1),
+       PIN_FIELD_BASE(97, 97, 4, 0x0020, 0x10, 8, 1),
+       PIN_FIELD_BASE(98, 98, 4, 0x0020, 0x10, 6, 1),
+       PIN_FIELD_BASE(99, 99, 2, 0x0060, 0x10, 9, 1),
+       PIN_FIELD_BASE(100, 100, 2, 0x0060, 0x10, 12, 1),
+       PIN_FIELD_BASE(101, 101, 2, 0x0060, 0x10, 10, 1),
+       PIN_FIELD_BASE(102, 102, 2, 0x0060, 0x10, 13, 1),
+       PIN_FIELD_BASE(103, 103, 2, 0x0060, 0x10, 11, 1),
+       PIN_FIELD_BASE(104, 104, 2, 0x0060, 0x10, 14, 1),
+       PIN_FIELD_BASE(105, 105, 2, 0x0050, 0x10, 10, 1),
+       PIN_FIELD_BASE(106, 106, 2, 0x0050, 0x10, 9, 1),
+       PIN_FIELD_BASE(107, 108, 2, 0x0050, 0x10, 12, 1),
+       PIN_FIELD_BASE(109, 109, 2, 0x0050, 0x10, 11, 1),
+       PIN_FIELD_BASE(110, 110, 2, 0x0060, 0x10, 16, 1),
+       PIN_FIELD_BASE(111, 111, 2, 0x0060, 0x10, 18, 1),
+       PIN_FIELD_BASE(112, 112, 2, 0x0060, 0x10, 15, 1),
+       PIN_FIELD_BASE(113, 113, 2, 0x0060, 0x10, 17, 1),
+       PIN_FIELD_BASE(114, 115, 2, 0x0050, 0x10, 26, 1),
+       PIN_FIELD_BASE(116, 117, 2, 0x0050, 0x10, 21, 1),
+       PIN_FIELD_BASE(118, 118, 2, 0x0050, 0x10, 31, 1),
+       PIN_FIELD_BASE(119, 119, 2, 0x0060, 0x10, 0, 1),
+       PIN_FIELD_BASE(120, 121, 2, 0x0050, 0x10, 23, 1),
+       PIN_FIELD_BASE(122, 123, 2, 0x0050, 0x10, 28, 1),
+       PIN_FIELD_BASE(124, 125, 2, 0x0060, 0x10, 1, 1),
+       PIN_FIELD_BASE(126, 127, 1, 0x0030, 0x10, 8, 1),
+       PIN_FIELD_BASE(128, 129, 1, 0x0030, 0x10, 17, 1),
+       PIN_FIELD_BASE(130, 130, 1, 0x0030, 0x10, 16, 1),
+       PIN_FIELD_BASE(131, 131, 1, 0x0030, 0x10, 19, 1),
+       PIN_FIELD_BASE(132, 132, 1, 0x0030, 0x10, 21, 1),
+       PIN_FIELD_BASE(133, 133, 1, 0x0030, 0x10, 20, 1),
+       PIN_FIELD_BASE(134, 135, 1, 0x0030, 0x10, 2, 1),
+       PIN_FIELD_BASE(136, 136, 1, 0x0030, 0x10, 7, 1),
+       PIN_FIELD_BASE(137, 137, 1, 0x0030, 0x10, 4, 1),
+       PIN_FIELD_BASE(138, 138, 1, 0x0030, 0x10, 6, 1),
+       PIN_FIELD_BASE(139, 139, 1, 0x0030, 0x10, 5, 1),
+       PIN_FIELD_BASE(140, 141, 1, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(142, 142, 1, 0x0030, 0x10, 15, 1),
+       PIN_FIELD_BASE(143, 143, 5, 0x0020, 0x10, 15, 1),
+       PIN_FIELD_BASE(144, 144, 5, 0x0020, 0x10, 17, 1),
+       PIN_FIELD_BASE(145, 145, 5, 0x0020, 0x10, 16, 1),
+       PIN_FIELD_BASE(146, 146, 5, 0x0020, 0x10, 12, 1),
+       PIN_FIELD_BASE(147, 155, 5, 0x0020, 0x10, 0, 1),
+       PIN_FIELD_BASE(156, 157, 5, 0x0020, 0x10, 22, 1),
+       PIN_FIELD_BASE(158, 158, 5, 0x0020, 0x10, 21, 1),
+       PIN_FIELD_BASE(159, 159, 5, 0x0020, 0x10, 24, 1),
+       PIN_FIELD_BASE(160, 161, 5, 0x0020, 0x10, 19, 1),
+       PIN_FIELD_BASE(162, 166, 5, 0x0020, 0x10, 25, 1),
+       PIN_FIELD_BASE(167, 168, 7, 0x0010, 0x10, 1, 1),
+       PIN_FIELD_BASE(169, 169, 7, 0x0010, 0x10, 4, 1),
+       PIN_FIELD_BASE(170, 170, 7, 0x0010, 0x10, 6, 1),
+       PIN_FIELD_BASE(171, 171, 7, 0x0010, 0x10, 8, 1),
+       PIN_FIELD_BASE(172, 172, 7, 0x0010, 0x10, 3, 1),
+       PIN_FIELD_BASE(173, 173, 7, 0x0010, 0x10, 7, 1),
+       PIN_FIELD_BASE(174, 175, 7, 0x0010, 0x10, 9, 1),
+       PIN_FIELD_BASE(176, 176, 7, 0x0010, 0x10, 0, 1),
+       PIN_FIELD_BASE(177, 177, 7, 0x0010, 0x10, 5, 1),
+       PIN_FIELD_BASE(178, 178, 7, 0x0010, 0x10, 11, 1),
+       PIN_FIELD_BASE(179, 179, 4, 0x0020, 0x10, 13, 1),
+       PIN_FIELD_BASE(180, 180, 4, 0x0020, 0x10, 10, 1),
+       PIN_FIELD_BASE(181, 183, 1, 0x0030, 0x10, 22, 1),
+       PIN_FIELD_BASE(184, 184, 1, 0x0030, 0x10, 12, 1),
+       PIN_FIELD_BASE(185, 185, 1, 0x0030, 0x10, 11, 1),
+       PIN_FIELD_BASE(186, 186, 1, 0x0030, 0x10, 13, 1),
+       PIN_FIELD_BASE(187, 187, 1, 0x0030, 0x10, 10, 1),
+       PIN_FIELD_BASE(188, 188, 1, 0x0030, 0x10, 14, 1),
+       PIN_FIELD_BASE(189, 189, 5, 0x0020, 0x10, 9, 1),
+       PIN_FIELD_BASE(190, 190, 5, 0x0020, 0x10, 18, 1),
+       PIN_FIELD_BASE(191, 192, 5, 0x0020, 0x10, 13, 1),
+       PIN_FIELD_BASE(193, 194, 5, 0x0020, 0x10, 10, 1),
+       PIN_FIELD_BASE(195, 195, 2, 0x0050, 0x10, 30, 1),
+       PIN_FIELD_BASE(196, 196, 2, 0x0050, 0x10, 25, 1),
+       PIN_FIELD_BASE(197, 197, 2, 0x0060, 0x10, 3, 1),
+       PIN_FIELD_BASE(198, 199, 4, 0x0020, 0x10, 14, 1),
+       PIN_FIELD_BASE(200, 201, 6, 0x0040, 0x10, 6, 1),
+       PIN_FIELD_BASE(202, 202, 4, 0x0020, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_smt_range[] = {
+       PINS_FIELD_BASE(0, 9, 6, 0x00c0, 0x10, 3, 1),
+       PIN_FIELD_BASE(10, 11, 3, 0x00e0, 0x10, 0, 1),
+       PINS_FIELD_BASE(12, 15, 3, 0x00e0, 0x10, 2, 1),
+       PIN_FIELD_BASE(16, 16, 3, 0x00e0, 0x10, 3, 1),
+       PINS_FIELD_BASE(17, 20, 6, 0x00c0, 0x10, 11, 1),
+       PINS_FIELD_BASE(21, 24, 6, 0x00c0, 0x10, 7, 1),
+       PIN_FIELD_BASE(25, 25, 6, 0x00c0, 0x10, 12, 1),
+       PIN_FIELD_BASE(26, 26, 6, 0x00c0, 0x10, 8, 1),
+       PIN_FIELD_BASE(27, 27, 6, 0x00c0, 0x10, 0, 1),
+       PIN_FIELD_BASE(28, 29, 6, 0x00c0, 0x10, 9, 1),
+       PINS_FIELD_BASE(30, 32, 6, 0x00c0, 0x10, 4, 1),
+       PIN_FIELD_BASE(33, 33, 6, 0x00c0, 0x10, 5, 1),
+       PIN_FIELD_BASE(34, 34, 6, 0x00c0, 0x10, 4, 1),
+       PINS_FIELD_BASE(35, 41, 6, 0x00c0, 0x10, 13, 1),
+       PIN_FIELD_BASE(42, 43, 6, 0x00c0, 0x10, 1, 1),
+       PIN_FIELD_BASE(44, 44, 6, 0x00c0, 0x10, 6, 1),
+       PIN_FIELD_BASE(45, 45, 3, 0x00e0, 0x10, 8, 1),
+       PIN_FIELD_BASE(46, 46, 3, 0x00e0, 0x10, 13, 1),
+       PINS_FIELD_BASE(47, 50, 3, 0x00e0, 0x10, 14, 1),
+       PIN_FIELD_BASE(51, 51, 3, 0x00e0, 0x10, 5, 1),
+       PIN_FIELD_BASE(52, 52, 3, 0x00e0, 0x10, 10, 1),
+       PIN_FIELD_BASE(53, 54, 3, 0x00e0, 0x10, 15, 1),
+       PIN_FIELD_BASE(55, 55, 3, 0x00e0, 0x10, 7, 1),
+       PIN_FIELD_BASE(56, 56, 3, 0x00e0, 0x10, 12, 1),
+       PINS_FIELD_BASE(57, 60, 3, 0x00e0, 0x10, 4, 1),
+       PIN_FIELD_BASE(61, 61, 3, 0x00e0, 0x10, 6, 1),
+       PIN_FIELD_BASE(62, 62, 3, 0x00e0, 0x10, 11, 1),
+       PINS_FIELD_BASE(63, 66, 3, 0x00e0, 0x10, 9, 1),
+       PINS_FIELD_BASE(67, 69, 2, 0x00e0, 0x10, 11, 1),
+       PIN_FIELD_BASE(70, 71, 2, 0x00e0, 0x10, 10, 1),
+       PINS_FIELD_BASE(72, 75, 4, 0x0070, 0x10, 1, 1),
+       PINS_FIELD_BASE(76, 77, 4, 0x0070, 0x10, 4, 1),
+       PINS_FIELD_BASE(78, 86, 2, 0x00e0, 0x10, 1, 1),
+       PINS_FIELD_BASE(87, 92, 2, 0x00e0, 0x10, 2, 1),
+       PIN_FIELD_BASE(93, 93, 4, 0x0070, 0x10, 0, 1),
+       PIN_FIELD_BASE(94, 94, 2, 0x00e0, 0x10, 2, 1),
+       PINS_FIELD_BASE(95, 98, 4, 0x0070, 0x10, 2, 1),
+       PINS_FIELD_BASE(99, 104, 2, 0x00e0, 0x10, 12, 1),
+       PINS_FIELD_BASE(105, 109, 2, 0x00e0, 0x10, 0, 1),
+       PIN_FIELD_BASE(110, 110, 2, 0x00e0, 0x10, 14, 1),
+       PIN_FIELD_BASE(111, 111, 2, 0x00e0, 0x10, 16, 1),
+       PIN_FIELD_BASE(112, 112, 2, 0x00e0, 0x10, 13, 1),
+       PIN_FIELD_BASE(113, 113, 2, 0x00e0, 0x10, 15, 1),
+       PINS_FIELD_BASE(114, 115, 2, 0x00e0, 0x10, 4, 1),
+       PIN_FIELD_BASE(116, 117, 2, 0x00e0, 0x10, 5, 1),
+       PINS_FIELD_BASE(118, 119, 2, 0x00e0, 0x10, 4, 1),
+       PIN_FIELD_BASE(120, 121, 2, 0x00e0, 0x10, 7, 1),
+       PINS_FIELD_BASE(122, 125, 2, 0x00e0, 0x10, 3, 1),
+       PINS_FIELD_BASE(126, 127, 1, 0x00c0, 0x10, 5, 1),
+       PINS_FIELD_BASE(128, 130, 1, 0x00c0, 0x10, 9, 1),
+       PINS_FIELD_BASE(131, 133, 1, 0x00c0, 0x10, 10, 1),
+       PIN_FIELD_BASE(134, 135, 1, 0x00c0, 0x10, 2, 1),
+       PINS_FIELD_BASE(136, 139, 1, 0x00c0, 0x10, 4, 1),
+       PIN_FIELD_BASE(140, 141, 1, 0x00c0, 0x10, 0, 1),
+       PIN_FIELD_BASE(142, 142, 1, 0x00c0, 0x10, 8, 1),
+       PINS_FIELD_BASE(143, 146, 5, 0x0060, 0x10, 1, 1),
+       PINS_FIELD_BASE(147, 155, 5, 0x0060, 0x10, 0, 1),
+       PIN_FIELD_BASE(156, 157, 5, 0x0060, 0x10, 6, 1),
+       PIN_FIELD_BASE(158, 158, 5, 0x0060, 0x10, 5, 1),
+       PIN_FIELD_BASE(159, 159, 5, 0x0060, 0x10, 8, 1),
+       PIN_FIELD_BASE(160, 161, 5, 0x0060, 0x10, 3, 1),
+       PINS_FIELD_BASE(162, 166, 5, 0x0060, 0x10, 2, 1),
+       PIN_FIELD_BASE(167, 167, 7, 0x0060, 0x10, 1, 1),
+       PINS_FIELD_BASE(168, 174, 7, 0x0060, 0x10, 2, 1),
+       PIN_FIELD_BASE(175, 175, 7, 0x0060, 0x10, 3, 1),
+       PIN_FIELD_BASE(176, 176, 7, 0x0060, 0x10, 0, 1),
+       PINS_FIELD_BASE(177, 178, 7, 0x0060, 0x10, 2, 1),
+       PINS_FIELD_BASE(179, 180, 4, 0x0070, 0x10, 4, 1),
+       PIN_FIELD_BASE(181, 183, 1, 0x00c0, 0x10, 11, 1),
+       PINS_FIELD_BASE(184, 187, 1, 0x00c0, 0x10, 6, 1),
+       PIN_FIELD_BASE(188, 188, 1, 0x00c0, 0x10, 7, 1),
+       PINS_FIELD_BASE(189, 194, 5, 0x0060, 0x10, 1, 1),
+       PIN_FIELD_BASE(195, 195, 2, 0x00e0, 0x10, 3, 1),
+       PIN_FIELD_BASE(196, 196, 2, 0x00e0, 0x10, 9, 1),
+       PIN_FIELD_BASE(197, 197, 2, 0x00e0, 0x10, 3, 1),
+       PIN_FIELD_BASE(198, 199, 4, 0x0070, 0x10, 5, 1),
+       PIN_FIELD_BASE(200, 201, 6, 0x00c0, 0x10, 14, 1),
+       PIN_FIELD_BASE(202, 202, 4, 0x0070, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_pu_range[] = {
+       PIN_FIELD_BASE(0, 9, 6, 0x0070, 0x10, 3, 1),
+       PIN_FIELD_BASE(16, 16, 3, 0x0080, 0x10, 0, 1),
+       PIN_FIELD_BASE(17, 18, 6, 0x0070, 0x10, 28, 1),
+       PIN_FIELD_BASE(19, 19, 6, 0x0070, 0x10, 27, 1),
+       PIN_FIELD_BASE(20, 20, 6, 0x0070, 0x10, 26, 1),
+       PIN_FIELD_BASE(21, 24, 6, 0x0070, 0x10, 19, 1),
+       PIN_FIELD_BASE(25, 25, 6, 0x0070, 0x10, 30, 1),
+       PIN_FIELD_BASE(26, 26, 6, 0x0070, 0x10, 23, 1),
+       PIN_FIELD_BASE(27, 27, 6, 0x0070, 0x10, 0, 1),
+       PIN_FIELD_BASE(28, 29, 6, 0x0070, 0x10, 24, 1),
+       PIN_FIELD_BASE(30, 30, 6, 0x0070, 0x10, 16, 1),
+       PIN_FIELD_BASE(31, 31, 6, 0x0070, 0x10, 13, 1),
+       PIN_FIELD_BASE(32, 32, 6, 0x0070, 0x10, 15, 1),
+       PIN_FIELD_BASE(33, 33, 6, 0x0070, 0x10, 17, 1),
+       PIN_FIELD_BASE(34, 34, 6, 0x0070, 0x10, 14, 1),
+       PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 5, 1),
+       PIN_FIELD_BASE(36, 36, 6, 0x0080, 0x10, 0, 1),
+       PIN_FIELD_BASE(37, 37, 6, 0x0080, 0x10, 6, 1),
+       PIN_FIELD_BASE(38, 41, 6, 0x0080, 0x10, 1, 1),
+       PIN_FIELD_BASE(42, 43, 6, 0x0070, 0x10, 1, 1),
+       PIN_FIELD_BASE(44, 44, 6, 0x0070, 0x10, 18, 1),
+       PIN_FIELD_BASE(45, 45, 3, 0x0080, 0x10, 4, 1),
+       PIN_FIELD_BASE(46, 46, 3, 0x0080, 0x10, 12, 1),
+       PIN_FIELD_BASE(47, 47, 3, 0x0080, 0x10, 15, 1),
+       PIN_FIELD_BASE(48, 48, 3, 0x0080, 0x10, 14, 1),
+       PIN_FIELD_BASE(49, 49, 3, 0x0080, 0x10, 16, 1),
+       PIN_FIELD_BASE(50, 50, 3, 0x0080, 0x10, 13, 1),
+       PIN_FIELD_BASE(51, 51, 3, 0x0080, 0x10, 1, 1),
+       PIN_FIELD_BASE(52, 52, 3, 0x0080, 0x10, 9, 1),
+       PIN_FIELD_BASE(53, 54, 3, 0x0080, 0x10, 18, 1),
+       PIN_FIELD_BASE(55, 55, 3, 0x0080, 0x10, 3, 1),
+       PIN_FIELD_BASE(56, 56, 3, 0x0080, 0x10, 11, 1),
+       PIN_FIELD_BASE(61, 61, 3, 0x0080, 0x10, 2, 1),
+       PIN_FIELD_BASE(62, 62, 3, 0x0080, 0x10, 10, 1),
+       PIN_FIELD_BASE(63, 63, 3, 0x0080, 0x10, 7, 1),
+       PIN_FIELD_BASE(64, 64, 3, 0x0080, 0x10, 6, 1),
+       PIN_FIELD_BASE(65, 65, 3, 0x0080, 0x10, 8, 1),
+       PIN_FIELD_BASE(66, 66, 3, 0x0080, 0x10, 5, 1),
+       PIN_FIELD_BASE(67, 67, 2, 0x00a0, 0x10, 7, 1),
+       PIN_FIELD_BASE(68, 68, 2, 0x00a0, 0x10, 6, 1),
+       PIN_FIELD_BASE(69, 69, 2, 0x00a0, 0x10, 8, 1),
+       PIN_FIELD_BASE(70, 71, 2, 0x00a0, 0x10, 4, 1),
+       PIN_FIELD_BASE(72, 72, 4, 0x0040, 0x10, 3, 1),
+       PIN_FIELD_BASE(73, 73, 4, 0x0040, 0x10, 2, 1),
+       PIN_FIELD_BASE(74, 74, 4, 0x0040, 0x10, 1, 1),
+       PIN_FIELD_BASE(75, 75, 4, 0x0040, 0x10, 4, 1),
+       PIN_FIELD_BASE(76, 76, 4, 0x0040, 0x10, 12, 1),
+       PIN_FIELD_BASE(77, 77, 4, 0x0040, 0x10, 11, 1),
+       PIN_FIELD_BASE(78, 78, 2, 0x0090, 0x10, 18, 1),
+       PIN_FIELD_BASE(79, 79, 2, 0x0090, 0x10, 17, 1),
+       PIN_FIELD_BASE(80, 81, 2, 0x0090, 0x10, 19, 1),
+       PIN_FIELD_BASE(82, 88, 2, 0x0090, 0x10, 1, 1),
+       PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 16, 1),
+       PIN_FIELD_BASE(90, 90, 2, 0x0090, 0x10, 15, 1),
+       PIN_FIELD_BASE(91, 91, 2, 0x0090, 0x10, 14, 1),
+       PIN_FIELD_BASE(92, 92, 2, 0x0090, 0x10, 8, 1),
+       PIN_FIELD_BASE(93, 93, 4, 0x0040, 0x10, 0, 1),
+       PIN_FIELD_BASE(94, 94, 2, 0x0090, 0x10, 0, 1),
+       PIN_FIELD_BASE(95, 95, 4, 0x0040, 0x10, 7, 1),
+       PIN_FIELD_BASE(96, 96, 4, 0x0040, 0x10, 5, 1),
+       PIN_FIELD_BASE(97, 97, 4, 0x0040, 0x10, 8, 1),
+       PIN_FIELD_BASE(98, 98, 4, 0x0040, 0x10, 6, 1),
+       PIN_FIELD_BASE(99, 99, 2, 0x00a0, 0x10, 9, 1),
+       PIN_FIELD_BASE(100, 100, 2, 0x00a0, 0x10, 12, 1),
+       PIN_FIELD_BASE(101, 101, 2, 0x00a0, 0x10, 10, 1),
+       PIN_FIELD_BASE(102, 102, 2, 0x00a0, 0x10, 13, 1),
+       PIN_FIELD_BASE(103, 103, 2, 0x00a0, 0x10, 11, 1),
+       PIN_FIELD_BASE(104, 104, 2, 0x00a0, 0x10, 14, 1),
+       PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 10, 1),
+       PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 9, 1),
+       PIN_FIELD_BASE(107, 108, 2, 0x0090, 0x10, 12, 1),
+       PIN_FIELD_BASE(109, 109, 2, 0x0090, 0x10, 11, 1),
+       PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 16, 1),
+       PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 18, 1),
+       PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 15, 1),
+       PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 17, 1),
+       PIN_FIELD_BASE(114, 115, 2, 0x0090, 0x10, 26, 1),
+       PIN_FIELD_BASE(116, 117, 2, 0x0090, 0x10, 21, 1),
+       PIN_FIELD_BASE(118, 118, 2, 0x0090, 0x10, 31, 1),
+       PIN_FIELD_BASE(119, 119, 2, 0x00a0, 0x10, 0, 1),
+       PIN_FIELD_BASE(120, 121, 2, 0x0090, 0x10, 23, 1),
+       PIN_FIELD_BASE(122, 123, 2, 0x0090, 0x10, 28, 1),
+       PIN_FIELD_BASE(124, 125, 2, 0x00a0, 0x10, 1, 1),
+       PIN_FIELD_BASE(126, 127, 1, 0x0070, 0x10, 2, 1),
+       PIN_FIELD_BASE(140, 141, 1, 0x0070, 0x10, 0, 1),
+       PIN_FIELD_BASE(142, 142, 1, 0x0070, 0x10, 9, 1),
+       PIN_FIELD_BASE(143, 143, 5, 0x0040, 0x10, 15, 1),
+       PIN_FIELD_BASE(144, 144, 5, 0x0040, 0x10, 17, 1),
+       PIN_FIELD_BASE(145, 145, 5, 0x0040, 0x10, 16, 1),
+       PIN_FIELD_BASE(146, 146, 5, 0x0040, 0x10, 12, 1),
+       PIN_FIELD_BASE(147, 155, 5, 0x0040, 0x10, 0, 1),
+       PIN_FIELD_BASE(156, 157, 5, 0x0040, 0x10, 22, 1),
+       PIN_FIELD_BASE(158, 158, 5, 0x0040, 0x10, 21, 1),
+       PIN_FIELD_BASE(159, 159, 5, 0x0040, 0x10, 24, 1),
+       PIN_FIELD_BASE(160, 161, 5, 0x0040, 0x10, 19, 1),
+       PIN_FIELD_BASE(162, 166, 5, 0x0040, 0x10, 25, 1),
+       PIN_FIELD_BASE(179, 179, 4, 0x0040, 0x10, 13, 1),
+       PIN_FIELD_BASE(180, 180, 4, 0x0040, 0x10, 10, 1),
+       PIN_FIELD_BASE(181, 183, 1, 0x0070, 0x10, 10, 1),
+       PIN_FIELD_BASE(184, 184, 1, 0x0070, 0x10, 6, 1),
+       PIN_FIELD_BASE(185, 185, 1, 0x0070, 0x10, 5, 1),
+       PIN_FIELD_BASE(186, 186, 1, 0x0070, 0x10, 7, 1),
+       PIN_FIELD_BASE(187, 187, 1, 0x0070, 0x10, 4, 1),
+       PIN_FIELD_BASE(188, 188, 1, 0x0070, 0x10, 8, 1),
+       PIN_FIELD_BASE(189, 189, 5, 0x0040, 0x10, 9, 1),
+       PIN_FIELD_BASE(190, 190, 5, 0x0040, 0x10, 18, 1),
+       PIN_FIELD_BASE(191, 192, 5, 0x0040, 0x10, 13, 1),
+       PIN_FIELD_BASE(193, 194, 5, 0x0040, 0x10, 10, 1),
+       PIN_FIELD_BASE(195, 195, 2, 0x0090, 0x10, 30, 1),
+       PIN_FIELD_BASE(196, 196, 2, 0x0090, 0x10, 25, 1),
+       PIN_FIELD_BASE(197, 197, 2, 0x00a0, 0x10, 3, 1),
+       PIN_FIELD_BASE(198, 199, 4, 0x0040, 0x10, 14, 1),
+       PIN_FIELD_BASE(200, 201, 6, 0x0080, 0x10, 7, 1),
+       PIN_FIELD_BASE(202, 202, 4, 0x0040, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_pd_range[] = {
+       PIN_FIELD_BASE(0, 9, 6, 0x0050, 0x10, 3, 1),
+       PIN_FIELD_BASE(16, 16, 3, 0x0060, 0x10, 0, 1),
+       PIN_FIELD_BASE(17, 18, 6, 0x0050, 0x10, 28, 1),
+       PIN_FIELD_BASE(19, 19, 6, 0x0050, 0x10, 27, 1),
+       PIN_FIELD_BASE(20, 20, 6, 0x0050, 0x10, 26, 1),
+       PIN_FIELD_BASE(21, 24, 6, 0x0050, 0x10, 19, 1),
+       PIN_FIELD_BASE(25, 25, 6, 0x0050, 0x10, 30, 1),
+       PIN_FIELD_BASE(26, 26, 6, 0x0050, 0x10, 23, 1),
+       PIN_FIELD_BASE(27, 27, 6, 0x0050, 0x10, 0, 1),
+       PIN_FIELD_BASE(28, 29, 6, 0x0050, 0x10, 24, 1),
+       PIN_FIELD_BASE(30, 30, 6, 0x0050, 0x10, 16, 1),
+       PIN_FIELD_BASE(31, 31, 6, 0x0050, 0x10, 13, 1),
+       PIN_FIELD_BASE(32, 32, 6, 0x0050, 0x10, 15, 1),
+       PIN_FIELD_BASE(33, 33, 6, 0x0050, 0x10, 17, 1),
+       PIN_FIELD_BASE(34, 34, 6, 0x0050, 0x10, 14, 1),
+       PIN_FIELD_BASE(35, 35, 6, 0x0060, 0x10, 5, 1),
+       PIN_FIELD_BASE(36, 36, 6, 0x0060, 0x10, 0, 1),
+       PIN_FIELD_BASE(37, 37, 6, 0x0060, 0x10, 6, 1),
+       PIN_FIELD_BASE(38, 41, 6, 0x0060, 0x10, 1, 1),
+       PIN_FIELD_BASE(42, 43, 6, 0x0050, 0x10, 1, 1),
+       PIN_FIELD_BASE(44, 44, 6, 0x0050, 0x10, 18, 1),
+       PIN_FIELD_BASE(45, 45, 3, 0x0060, 0x10, 4, 1),
+       PIN_FIELD_BASE(46, 46, 3, 0x0060, 0x10, 12, 1),
+       PIN_FIELD_BASE(47, 47, 3, 0x0060, 0x10, 15, 1),
+       PIN_FIELD_BASE(48, 48, 3, 0x0060, 0x10, 14, 1),
+       PIN_FIELD_BASE(49, 49, 3, 0x0060, 0x10, 16, 1),
+       PIN_FIELD_BASE(50, 50, 3, 0x0060, 0x10, 13, 1),
+       PIN_FIELD_BASE(51, 51, 3, 0x0060, 0x10, 1, 1),
+       PIN_FIELD_BASE(52, 52, 3, 0x0060, 0x10, 9, 1),
+       PIN_FIELD_BASE(53, 54, 3, 0x0060, 0x10, 18, 1),
+       PIN_FIELD_BASE(55, 55, 3, 0x0060, 0x10, 3, 1),
+       PIN_FIELD_BASE(56, 56, 3, 0x0060, 0x10, 11, 1),
+       PIN_FIELD_BASE(61, 61, 3, 0x0060, 0x10, 2, 1),
+       PIN_FIELD_BASE(62, 62, 3, 0x0060, 0x10, 10, 1),
+       PIN_FIELD_BASE(63, 63, 3, 0x0060, 0x10, 7, 1),
+       PIN_FIELD_BASE(64, 64, 3, 0x0060, 0x10, 6, 1),
+       PIN_FIELD_BASE(65, 65, 3, 0x0060, 0x10, 8, 1),
+       PIN_FIELD_BASE(66, 66, 3, 0x0060, 0x10, 5, 1),
+       PIN_FIELD_BASE(67, 67, 2, 0x0080, 0x10, 7, 1),
+       PIN_FIELD_BASE(68, 68, 2, 0x0080, 0x10, 6, 1),
+       PIN_FIELD_BASE(69, 69, 2, 0x0080, 0x10, 8, 1),
+       PIN_FIELD_BASE(70, 71, 2, 0x0080, 0x10, 4, 1),
+       PIN_FIELD_BASE(72, 72, 4, 0x0030, 0x10, 3, 1),
+       PIN_FIELD_BASE(73, 73, 4, 0x0030, 0x10, 2, 1),
+       PIN_FIELD_BASE(74, 74, 4, 0x0030, 0x10, 1, 1),
+       PIN_FIELD_BASE(75, 75, 4, 0x0030, 0x10, 4, 1),
+       PIN_FIELD_BASE(76, 76, 4, 0x0030, 0x10, 12, 1),
+       PIN_FIELD_BASE(77, 77, 4, 0x0030, 0x10, 11, 1),
+       PIN_FIELD_BASE(78, 78, 2, 0x0070, 0x10, 18, 1),
+       PIN_FIELD_BASE(79, 79, 2, 0x0070, 0x10, 17, 1),
+       PIN_FIELD_BASE(80, 81, 2, 0x0070, 0x10, 19, 1),
+       PIN_FIELD_BASE(82, 88, 2, 0x0070, 0x10, 1, 1),
+       PIN_FIELD_BASE(89, 89, 2, 0x0070, 0x10, 16, 1),
+       PIN_FIELD_BASE(90, 90, 2, 0x0070, 0x10, 15, 1),
+       PIN_FIELD_BASE(91, 91, 2, 0x0070, 0x10, 14, 1),
+       PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 8, 1),
+       PIN_FIELD_BASE(93, 93, 4, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(94, 94, 2, 0x0070, 0x10, 0, 1),
+       PIN_FIELD_BASE(95, 95, 4, 0x0030, 0x10, 7, 1),
+       PIN_FIELD_BASE(96, 96, 4, 0x0030, 0x10, 5, 1),
+       PIN_FIELD_BASE(97, 97, 4, 0x0030, 0x10, 8, 1),
+       PIN_FIELD_BASE(98, 98, 4, 0x0030, 0x10, 6, 1),
+       PIN_FIELD_BASE(99, 99, 2, 0x0080, 0x10, 9, 1),
+       PIN_FIELD_BASE(100, 100, 2, 0x0080, 0x10, 12, 1),
+       PIN_FIELD_BASE(101, 101, 2, 0x0080, 0x10, 10, 1),
+       PIN_FIELD_BASE(102, 102, 2, 0x0080, 0x10, 13, 1),
+       PIN_FIELD_BASE(103, 103, 2, 0x0080, 0x10, 11, 1),
+       PIN_FIELD_BASE(104, 104, 2, 0x0080, 0x10, 14, 1),
+       PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 10, 1),
+       PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 9, 1),
+       PIN_FIELD_BASE(107, 108, 2, 0x0070, 0x10, 12, 1),
+       PIN_FIELD_BASE(109, 109, 2, 0x0070, 0x10, 11, 1),
+       PIN_FIELD_BASE(110, 110, 2, 0x0080, 0x10, 16, 1),
+       PIN_FIELD_BASE(111, 111, 2, 0x0080, 0x10, 18, 1),
+       PIN_FIELD_BASE(112, 112, 2, 0x0080, 0x10, 15, 1),
+       PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 17, 1),
+       PIN_FIELD_BASE(114, 115, 2, 0x0070, 0x10, 26, 1),
+       PIN_FIELD_BASE(116, 117, 2, 0x0070, 0x10, 21, 1),
+       PIN_FIELD_BASE(118, 118, 2, 0x0070, 0x10, 31, 1),
+       PIN_FIELD_BASE(119, 119, 2, 0x0080, 0x10, 0, 1),
+       PIN_FIELD_BASE(120, 121, 2, 0x0070, 0x10, 23, 1),
+       PIN_FIELD_BASE(122, 123, 2, 0x0070, 0x10, 28, 1),
+       PIN_FIELD_BASE(124, 125, 2, 0x0080, 0x10, 1, 1),
+       PIN_FIELD_BASE(126, 127, 1, 0x0050, 0x10, 2, 1),
+       PIN_FIELD_BASE(140, 141, 1, 0x0050, 0x10, 0, 1),
+       PIN_FIELD_BASE(142, 142, 1, 0x0050, 0x10, 9, 1),
+       PIN_FIELD_BASE(143, 143, 5, 0x0030, 0x10, 15, 1),
+       PIN_FIELD_BASE(144, 144, 5, 0x0030, 0x10, 17, 1),
+       PIN_FIELD_BASE(145, 145, 5, 0x0030, 0x10, 16, 1),
+       PIN_FIELD_BASE(146, 146, 5, 0x0030, 0x10, 12, 1),
+       PIN_FIELD_BASE(147, 155, 5, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(156, 157, 5, 0x0030, 0x10, 22, 1),
+       PIN_FIELD_BASE(158, 158, 5, 0x0030, 0x10, 21, 1),
+       PIN_FIELD_BASE(159, 159, 5, 0x0030, 0x10, 24, 1),
+       PIN_FIELD_BASE(160, 161, 5, 0x0030, 0x10, 19, 1),
+       PIN_FIELD_BASE(162, 166, 5, 0x0030, 0x10, 25, 1),
+       PIN_FIELD_BASE(179, 179, 4, 0x0030, 0x10, 13, 1),
+       PIN_FIELD_BASE(180, 180, 4, 0x0030, 0x10, 10, 1),
+       PIN_FIELD_BASE(181, 183, 1, 0x0050, 0x10, 10, 1),
+       PIN_FIELD_BASE(184, 184, 1, 0x0050, 0x10, 6, 1),
+       PIN_FIELD_BASE(185, 185, 1, 0x0050, 0x10, 5, 1),
+       PIN_FIELD_BASE(186, 186, 1, 0x0050, 0x10, 7, 1),
+       PIN_FIELD_BASE(187, 187, 1, 0x0050, 0x10, 4, 1),
+       PIN_FIELD_BASE(188, 188, 1, 0x0050, 0x10, 8, 1),
+       PIN_FIELD_BASE(189, 189, 5, 0x0030, 0x10, 9, 1),
+       PIN_FIELD_BASE(190, 190, 5, 0x0030, 0x10, 18, 1),
+       PIN_FIELD_BASE(191, 192, 5, 0x0030, 0x10, 13, 1),
+       PIN_FIELD_BASE(193, 194, 5, 0x0030, 0x10, 10, 1),
+       PIN_FIELD_BASE(195, 195, 2, 0x0070, 0x10, 30, 1),
+       PIN_FIELD_BASE(196, 196, 2, 0x0070, 0x10, 25, 1),
+       PIN_FIELD_BASE(197, 197, 2, 0x0080, 0x10, 3, 1),
+       PIN_FIELD_BASE(198, 199, 4, 0x0030, 0x10, 14, 1),
+       PIN_FIELD_BASE(200, 201, 6, 0x0060, 0x10, 7, 1),
+       PIN_FIELD_BASE(202, 202, 4, 0x0030, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_drv_range[] = {
+       PINS_FIELD_BASE(0, 9, 6, 0x0000, 0x10, 9, 3),
+       PIN_FIELD_BASE(10, 16, 3, 0x0000, 0x10, 0, 3),
+       PINS_FIELD_BASE(17, 19, 6, 0x0010, 0x10, 3, 3),
+       PIN_FIELD_BASE(20, 20, 6, 0x0010, 0x10, 6, 3),
+       PINS_FIELD_BASE(21, 24, 6, 0x0000, 0x10, 21, 3),
+       PIN_FIELD_BASE(25, 25, 6, 0x0010, 0x10, 9, 3),
+       PIN_FIELD_BASE(26, 26, 6, 0x0000, 0x10, 24, 3),
+       PIN_FIELD_BASE(27, 27, 6, 0x0000, 0x10, 0, 3),
+       PIN_FIELD_BASE(28, 28, 6, 0x0000, 0x10, 27, 3),
+       PIN_FIELD_BASE(29, 29, 6, 0x0010, 0x10, 0, 3),
+       PINS_FIELD_BASE(30, 32, 6, 0x0000, 0x10, 12, 3),
+       PIN_FIELD_BASE(33, 33, 6, 0x0000, 0x10, 15, 3),
+       PIN_FIELD_BASE(34, 34, 6, 0x0000, 0x10, 12, 3),
+       PINS_FIELD_BASE(35, 41, 6, 0x0010, 0x10, 12, 3),
+       PIN_FIELD_BASE(42, 43, 6, 0x0000, 0x10, 3, 3),
+       PIN_FIELD_BASE(44, 44, 6, 0x0000, 0x10, 18, 3),
+       PIN_FIELD_BASE(45, 45, 3, 0x0010, 0x10, 12, 3),
+       PIN_FIELD_BASE(46, 46, 3, 0x0020, 0x10, 0, 3),
+       PINS_FIELD_BASE(47, 49, 3, 0x0020, 0x10, 3, 3),
+       PIN_FIELD_BASE(50, 50, 3, 0x0020, 0x10, 6, 3),
+       PIN_FIELD_BASE(51, 51, 3, 0x0010, 0x10, 3, 3),
+       PIN_FIELD_BASE(52, 52, 3, 0x0010, 0x10, 21, 3),
+       PINS_FIELD_BASE(53, 54, 3, 0x0020, 0x10, 9, 3),
+       PIN_FIELD_BASE(55, 55, 3, 0x0010, 0x10, 9, 3),
+       PIN_FIELD_BASE(56, 56, 3, 0x0010, 0x10, 27, 3),
+       PIN_FIELD_BASE(57, 57, 3, 0x0010, 0x10, 0, 3),
+       PIN_FIELD_BASE(58, 58, 3, 0x0000, 0x10, 27, 3),
+       PIN_FIELD_BASE(59, 60, 3, 0x0000, 0x10, 21, 3),
+       PIN_FIELD_BASE(61, 61, 3, 0x0010, 0x10, 6, 3),
+       PIN_FIELD_BASE(62, 62, 3, 0x0010, 0x10, 24, 3),
+       PINS_FIELD_BASE(63, 65, 3, 0x0010, 0x10, 15, 3),
+       PIN_FIELD_BASE(66, 66, 3, 0x0010, 0x10, 18, 3),
+       PINS_FIELD_BASE(67, 69, 2, 0x0010, 0x10, 3, 3),
+       PIN_FIELD_BASE(70, 71, 2, 0x0010, 0x10, 0, 3),
+       PINS_FIELD_BASE(72, 75, 4, 0x0000, 0x10, 0, 3),
+       PINS_FIELD_BASE(76, 77, 4, 0x0000, 0x10, 15, 3),
+       PINS_FIELD_BASE(78, 86, 2, 0x0000, 0x10, 3, 3),
+       PINS_FIELD_BASE(87, 92, 2, 0x0000, 0x10, 6, 3),
+       PIN_FIELD_BASE(93, 93, 4, 0x0000, 0x10, 3, 3),
+       PIN_FIELD_BASE(94, 94, 2, 0x0000, 0x10, 6, 3),
+       PINS_FIELD_BASE(95, 96, 4, 0x0000, 0x10, 6, 3),
+       PINS_FIELD_BASE(97, 98, 4, 0x0000, 0x10, 9, 3),
+       PINS_FIELD_BASE(99, 100, 2, 0x0010, 0x10, 6, 3),
+       PINS_FIELD_BASE(101, 102, 2, 0x0010, 0x10, 9, 3),
+       PINS_FIELD_BASE(103, 104, 2, 0x0010, 0x10, 12, 3),
+       PINS_FIELD_BASE(105, 109, 2, 0x0000, 0x10, 0, 3),
+       PIN_FIELD_BASE(110, 110, 2, 0x0010, 0x10, 18, 3),
+       PIN_FIELD_BASE(111, 111, 2, 0x0010, 0x10, 24, 3),
+       PIN_FIELD_BASE(112, 112, 2, 0x0010, 0x10, 15, 3),
+       PIN_FIELD_BASE(113, 113, 2, 0x0010, 0x10, 21, 3),
+       PINS_FIELD_BASE(114, 115, 2, 0x0000, 0x10, 12, 3),
+       PIN_FIELD_BASE(116, 117, 2, 0x0000, 0x10, 15, 3),
+       PINS_FIELD_BASE(118, 119, 2, 0x0000, 0x10, 12, 3),
+       PIN_FIELD_BASE(120, 121, 2, 0x0000, 0x10, 21, 3),
+       PINS_FIELD_BASE(122, 125, 2, 0x0000, 0x10, 9, 3),
+       PINS_FIELD_BASE(126, 127, 1, 0x0000, 0x10, 12, 3),
+       PIN_FIELD_BASE(128, 128, 1, 0x0000, 0x10, 29, 2),
+       PIN_FIELD_BASE(129, 129, 1, 0x0010, 0x10, 0, 2),
+       PIN_FIELD_BASE(130, 130, 1, 0x0000, 0x10, 27, 2),
+       PIN_FIELD_BASE(131, 131, 1, 0x0010, 0x10, 2, 2),
+       PIN_FIELD_BASE(132, 132, 1, 0x0010, 0x10, 6, 2),
+       PIN_FIELD_BASE(133, 133, 1, 0x0010, 0x10, 4, 2),
+       PIN_FIELD_BASE(134, 135, 1, 0x0000, 0x10, 3, 3),
+       PINS_FIELD_BASE(136, 139, 1, 0x0000, 0x10, 9, 3),
+       PINS_FIELD_BASE(140, 141, 1, 0x0000, 0x10, 0, 3),
+       PIN_FIELD_BASE(142, 142, 1, 0x0000, 0x10, 24, 3),
+       PINS_FIELD_BASE(143, 146, 5, 0x0000, 0x10, 3, 3),
+       PINS_FIELD_BASE(147, 155, 5, 0x0000, 0x10, 0, 3),
+       PIN_FIELD_BASE(156, 157, 5, 0x0000, 0x10, 21, 3),
+       PIN_FIELD_BASE(158, 158, 5, 0x0000, 0x10, 15, 3),
+       PIN_FIELD_BASE(159, 159, 5, 0x0000, 0x10, 27, 3),
+       PIN_FIELD_BASE(160, 161, 5, 0x0000, 0x10, 9, 3),
+       PINS_FIELD_BASE(162, 166, 5, 0x0000, 0x10, 18, 3),
+       PIN_FIELD_BASE(167, 167, 7, 0x0000, 0x10, 3, 3),
+       PINS_FIELD_BASE(168, 174, 7, 0x0000, 0x10, 6, 3),
+       PIN_FIELD_BASE(175, 175, 7, 0x0000, 0x10, 9, 3),
+       PIN_FIELD_BASE(176, 176, 7, 0x0000, 0x10, 0, 3),
+       PINS_FIELD_BASE(177, 178, 7, 0x0000, 0x10, 6, 3),
+       PIN_FIELD_BASE(179, 180, 4, 0x0000, 0x10, 15, 3),
+       PIN_FIELD_BASE(181, 183, 1, 0x0010, 0x10, 8, 3),
+       PINS_FIELD_BASE(184, 186, 1, 0x0000, 0x10, 15, 3),
+       PIN_FIELD_BASE(187, 188, 1, 0x0000, 0x10, 18, 3),
+       PIN_FIELD_BASE(189, 189, 5, 0x0000, 0x10, 6, 3),
+       PINS_FIELD_BASE(190, 194, 5, 0x0000, 0x10, 3, 3),
+       PIN_FIELD_BASE(195, 195, 2, 0x0000, 0x10, 9, 3),
+       PIN_FIELD_BASE(196, 196, 2, 0x0000, 0x10, 27, 3),
+       PIN_FIELD_BASE(197, 197, 2, 0x0000, 0x10, 9, 3),
+       PIN_FIELD_BASE(198, 199, 4, 0x0000, 0x10, 21, 3),
+       PINS_FIELD_BASE(200, 201, 6, 0x0010, 0x10, 15, 3),
+       PIN_FIELD_BASE(202, 202, 4, 0x0000, 0x10, 12, 3),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_pupd_range[] = {
+       PIN_FIELD_BASE(10, 15, 3, 0x0070, 0x10, 0, 1),
+       PIN_FIELD_BASE(57, 57, 3, 0x0070, 0x10, 9, 1),
+       PIN_FIELD_BASE(58, 58, 3, 0x0070, 0x10, 8, 1),
+       PIN_FIELD_BASE(59, 60, 3, 0x0070, 0x10, 6, 1),
+       PIN_FIELD_BASE(128, 129, 1, 0x0060, 0x10, 7, 1),
+       PIN_FIELD_BASE(130, 130, 1, 0x0060, 0x10, 6, 1),
+       PIN_FIELD_BASE(131, 131, 1, 0x0060, 0x10, 9, 1),
+       PIN_FIELD_BASE(132, 132, 1, 0x0060, 0x10, 11, 1),
+       PIN_FIELD_BASE(133, 133, 1, 0x0060, 0x10, 10, 1),
+       PIN_FIELD_BASE(134, 135, 1, 0x0060, 0x10, 0, 1),
+       PIN_FIELD_BASE(136, 136, 1, 0x0060, 0x10, 5, 1),
+       PIN_FIELD_BASE(137, 137, 1, 0x0060, 0x10, 2, 1),
+       PIN_FIELD_BASE(138, 138, 1, 0x0060, 0x10, 4, 1),
+       PIN_FIELD_BASE(139, 139, 1, 0x0060, 0x10, 3, 1),
+       PIN_FIELD_BASE(167, 168, 7, 0x0020, 0x10, 1, 1),
+       PIN_FIELD_BASE(169, 169, 7, 0x0020, 0x10, 4, 1),
+       PIN_FIELD_BASE(170, 170, 7, 0x0020, 0x10, 6, 1),
+       PIN_FIELD_BASE(171, 171, 7, 0x0020, 0x10, 8, 1),
+       PIN_FIELD_BASE(172, 172, 7, 0x0020, 0x10, 3, 1),
+       PIN_FIELD_BASE(173, 173, 7, 0x0020, 0x10, 7, 1),
+       PIN_FIELD_BASE(174, 175, 7, 0x0020, 0x10, 9, 1),
+       PIN_FIELD_BASE(176, 176, 7, 0x0020, 0x10, 0, 1),
+       PIN_FIELD_BASE(177, 177, 7, 0x0020, 0x10, 5, 1),
+       PIN_FIELD_BASE(178, 178, 7, 0x0020, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_r0_range[] = {
+       PIN_FIELD_BASE(10, 15, 3, 0x0090, 0x10, 0, 1),
+       PIN_FIELD_BASE(57, 57, 3, 0x0090, 0x10, 9, 1),
+       PIN_FIELD_BASE(58, 58, 3, 0x0090, 0x10, 8, 1),
+       PIN_FIELD_BASE(59, 60, 3, 0x0090, 0x10, 6, 1),
+       PIN_FIELD_BASE(128, 129, 1, 0x0080, 0x10, 7, 1),
+       PIN_FIELD_BASE(130, 130, 1, 0x0080, 0x10, 6, 1),
+       PIN_FIELD_BASE(131, 131, 1, 0x0080, 0x10, 9, 1),
+       PIN_FIELD_BASE(132, 132, 1, 0x0080, 0x10, 11, 1),
+       PIN_FIELD_BASE(133, 133, 1, 0x0080, 0x10, 10, 1),
+       PIN_FIELD_BASE(134, 135, 1, 0x0080, 0x10, 0, 1),
+       PIN_FIELD_BASE(136, 136, 1, 0x0080, 0x10, 5, 1),
+       PIN_FIELD_BASE(137, 137, 1, 0x0080, 0x10, 2, 1),
+       PIN_FIELD_BASE(138, 138, 1, 0x0080, 0x10, 4, 1),
+       PIN_FIELD_BASE(139, 139, 1, 0x0080, 0x10, 3, 1),
+       PIN_FIELD_BASE(167, 168, 7, 0x0030, 0x10, 1, 1),
+       PIN_FIELD_BASE(169, 169, 7, 0x0030, 0x10, 4, 1),
+       PIN_FIELD_BASE(170, 170, 7, 0x0030, 0x10, 6, 1),
+       PIN_FIELD_BASE(171, 171, 7, 0x0030, 0x10, 8, 1),
+       PIN_FIELD_BASE(172, 172, 7, 0x0030, 0x10, 3, 1),
+       PIN_FIELD_BASE(173, 173, 7, 0x0030, 0x10, 7, 1),
+       PIN_FIELD_BASE(174, 175, 7, 0x0030, 0x10, 9, 1),
+       PIN_FIELD_BASE(176, 176, 7, 0x0030, 0x10, 0, 1),
+       PIN_FIELD_BASE(177, 177, 7, 0x0030, 0x10, 5, 1),
+       PIN_FIELD_BASE(178, 178, 7, 0x0030, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6779_pin_r1_range[] = {
+       PIN_FIELD_BASE(10, 15, 3, 0x00a0, 0x10, 0, 1),
+       PIN_FIELD_BASE(57, 57, 3, 0x00a0, 0x10, 9, 1),
+       PIN_FIELD_BASE(58, 58, 3, 0x00a0, 0x10, 8, 1),
+       PIN_FIELD_BASE(59, 60, 3, 0x00a0, 0x10, 6, 1),
+       PIN_FIELD_BASE(128, 129, 1, 0x0090, 0x10, 7, 1),
+       PIN_FIELD_BASE(130, 130, 1, 0x0090, 0x10, 6, 1),
+       PIN_FIELD_BASE(131, 131, 1, 0x0090, 0x10, 9, 1),
+       PIN_FIELD_BASE(132, 132, 1, 0x0090, 0x10, 11, 1),
+       PIN_FIELD_BASE(133, 133, 1, 0x0090, 0x10, 10, 1),
+       PIN_FIELD_BASE(134, 135, 1, 0x0090, 0x10, 0, 1),
+       PIN_FIELD_BASE(136, 136, 1, 0x0090, 0x10, 5, 1),
+       PIN_FIELD_BASE(137, 137, 1, 0x0090, 0x10, 2, 1),
+       PIN_FIELD_BASE(138, 138, 1, 0x0090, 0x10, 4, 1),
+       PIN_FIELD_BASE(139, 139, 1, 0x0090, 0x10, 3, 1),
+       PIN_FIELD_BASE(167, 168, 7, 0x0040, 0x10, 1, 1),
+       PIN_FIELD_BASE(169, 169, 7, 0x0040, 0x10, 4, 1),
+       PIN_FIELD_BASE(170, 170, 7, 0x0040, 0x10, 6, 1),
+       PIN_FIELD_BASE(171, 171, 7, 0x0040, 0x10, 8, 1),
+       PIN_FIELD_BASE(172, 172, 7, 0x0040, 0x10, 3, 1),
+       PIN_FIELD_BASE(173, 173, 7, 0x0040, 0x10, 7, 1),
+       PIN_FIELD_BASE(174, 175, 7, 0x0040, 0x10, 9, 1),
+       PIN_FIELD_BASE(176, 176, 7, 0x0040, 0x10, 0, 1),
+       PIN_FIELD_BASE(177, 177, 7, 0x0040, 0x10, 5, 1),
+       PIN_FIELD_BASE(178, 178, 7, 0x0040, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_reg_calc mt6779_reg_cals[PINCTRL_PIN_REG_MAX] = {
+       [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6779_pin_mode_range),
+       [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6779_pin_dir_range),
+       [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6779_pin_di_range),
+       [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6779_pin_do_range),
+       [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6779_pin_smt_range),
+       [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6779_pin_ies_range),
+       [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6779_pin_pu_range),
+       [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6779_pin_pd_range),
+       [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6779_pin_drv_range),
+       [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6779_pin_pupd_range),
+       [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6779_pin_r0_range),
+       [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6779_pin_r1_range),
+};
+
+static const char * const mt6779_pinctrl_register_base_names[] = {
+       "gpio", "iocfg_rm", "iocfg_br", "iocfg_lm", "iocfg_lb",
+       "iocfg_rt", "iocfg_lt", "iocfg_tl",
+};
+
+static const struct mtk_eint_hw mt6779_eint_hw = {
+       .port_mask = 7,
+       .ports     = 6,
+       .ap_num    = 195,
+       .db_cnt    = 13,
+};
+
+static const struct mtk_pin_soc mt6779_data = {
+       .reg_cal = mt6779_reg_cals,
+       .pins = mtk_pins_mt6779,
+       .npins = ARRAY_SIZE(mtk_pins_mt6779),
+       .ngrps = ARRAY_SIZE(mtk_pins_mt6779),
+       .eint_hw = &mt6779_eint_hw,
+       .gpio_m = 0,
+       .ies_present = true,
+       .base_names = mt6779_pinctrl_register_base_names,
+       .nbase_names = ARRAY_SIZE(mt6779_pinctrl_register_base_names),
+       .bias_set_combo = mtk_pinconf_bias_set_combo,
+       .bias_get_combo = mtk_pinconf_bias_get_combo,
+       .drive_set = mtk_pinconf_drive_set_raw,
+       .drive_get = mtk_pinconf_drive_get_raw,
+       .adv_pull_get = mtk_pinconf_adv_pull_get,
+       .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt6779_pinctrl_of_match[] = {
+       { .compatible = "mediatek,mt6779-pinctrl", },
+       { }
+};
+
+static int mt6779_pinctrl_probe(struct platform_device *pdev)
+{
+       return mtk_paris_pinctrl_probe(pdev, &mt6779_data);
+}
+
+static struct platform_driver mt6779_pinctrl_driver = {
+       .driver = {
+               .name = "mt6779-pinctrl",
+               .of_match_table = mt6779_pinctrl_of_match,
+       },
+       .probe = mt6779_pinctrl_probe,
+};
+
+static int __init mt6779_pinctrl_init(void)
+{
+       return platform_driver_register(&mt6779_pinctrl_driver);
+}
+arch_initcall(mt6779_pinctrl_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek MT6779 Pinctrl Driver");
index b77b18f..2f3dfb5 100644 (file)
@@ -243,6 +243,29 @@ static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
        return EINT_NA;
 }
 
+/*
+ * Virtual GPIO only used inside SOC and not being exported to outside SOC.
+ * Some modules use virtual GPIO as eint (e.g. pmif or usb).
+ * In MTK platform, external interrupt (EINT) and GPIO is 1-1 mapping
+ * and we can set GPIO as eint.
+ * But some modules use specific eint which doesn't have real GPIO pin.
+ * So we use virtual GPIO to map it.
+ */
+
+bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n)
+{
+       const struct mtk_pin_desc *desc;
+       bool virt_gpio = false;
+
+       desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
+
+       if (desc->funcs && !desc->funcs[desc->eint.eint_m].name)
+               virt_gpio = true;
+
+       return virt_gpio;
+}
+EXPORT_SYMBOL_GPL(mtk_is_virt_gpio);
+
 static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
                             unsigned int *gpio_n,
                             struct gpio_chip **gpio_chip)
@@ -295,6 +318,9 @@ static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
        if (err)
                return err;
 
+       if (mtk_is_virt_gpio(hw, gpio_n))
+               return 0;
+
        desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
 
        err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
index 27df087..e2aae28 100644 (file)
@@ -80,7 +80,7 @@ enum {
        DRV_GRP_MAX,
 };
 
-static const char * const mtk_default_register_base_names[] = {
+static const char * const mtk_default_register_base_names[] __maybe_unused = {
        "base",
 };
 
@@ -315,4 +315,5 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
 int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
                              const struct mtk_pin_desc *desc, u32 *val);
 
+bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n);
 #endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h
new file mode 100644 (file)
index 0000000..0a48d66
--- /dev/null
@@ -0,0 +1,2085 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT6779_H
+#define __PINCTRL_MTK_MT6779_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt6779[] = {
+       MTK_PIN(
+               0, "GPIO0",
+               MTK_EINT_FUNCTION(0, 0),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO0"),
+               MTK_FUNCTION(1, "SPI6_MI"),
+               MTK_FUNCTION(2, "I2S5_LRCK"),
+               MTK_FUNCTION(3, "TDM_LRCK_2ND"),
+               MTK_FUNCTION(4, "PCM1_SYNC"),
+               MTK_FUNCTION(5, "SCL_6306"),
+               MTK_FUNCTION(6, "TP_GPIO0_AO"),
+               MTK_FUNCTION(7, "PTA_RXD")
+       ),
+       MTK_PIN(
+               1, "GPIO1",
+               MTK_EINT_FUNCTION(0, 1),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO1"),
+               MTK_FUNCTION(1, "SPI6_CSB"),
+               MTK_FUNCTION(2, "I2S5_DO"),
+               MTK_FUNCTION(3, "TDM_DATA0_2ND"),
+               MTK_FUNCTION(4, "PCM1_DO0"),
+               MTK_FUNCTION(5, "SDA_6306"),
+               MTK_FUNCTION(6, "TP_GPIO1_AO"),
+               MTK_FUNCTION(7, "PTA_TXD")
+       ),
+       MTK_PIN(
+               2, "GPIO2",
+               MTK_EINT_FUNCTION(0, 2),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO2"),
+               MTK_FUNCTION(1, "SPI6_MO"),
+               MTK_FUNCTION(2, "I2S5_BCK"),
+               MTK_FUNCTION(3, "TDM_BCK_2ND"),
+               MTK_FUNCTION(4, "PCM1_CLK"),
+               MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+               MTK_FUNCTION(6, "TP_GPIO2_AO")
+       ),
+       MTK_PIN(
+               3, "GPIO3",
+               MTK_EINT_FUNCTION(0, 3),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO3"),
+               MTK_FUNCTION(1, "SPI6_CLK"),
+               MTK_FUNCTION(2, "I2S5_MCK"),
+               MTK_FUNCTION(3, "TDM_MCK_2ND"),
+               MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+               MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+               MTK_FUNCTION(6, "TP_GPIO3_AO")
+       ),
+       MTK_PIN(
+               4, "GPIO4",
+               MTK_EINT_FUNCTION(0, 4),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO4"),
+               MTK_FUNCTION(1, "SPI7_MI"),
+               MTK_FUNCTION(2, "I2S0_MCK"),
+               MTK_FUNCTION(3, "TDM_DATA1_2ND"),
+               MTK_FUNCTION(4, "PCM1_DO1"),
+               MTK_FUNCTION(5, "DMIC1_CLK"),
+               MTK_FUNCTION(6, "TP_GPIO4_AO"),
+               MTK_FUNCTION(7, "SCL8")
+       ),
+       MTK_PIN(
+               5, "GPIO5",
+               MTK_EINT_FUNCTION(0, 5),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO5"),
+               MTK_FUNCTION(1, "SPI7_CSB"),
+               MTK_FUNCTION(2, "I2S0_BCK"),
+               MTK_FUNCTION(3, "TDM_DATA2_2ND"),
+               MTK_FUNCTION(4, "PCM1_DO2"),
+               MTK_FUNCTION(5, "DMIC1_DAT"),
+               MTK_FUNCTION(6, "TP_GPIO5_AO"),
+               MTK_FUNCTION(7, "SDA8")
+       ),
+       MTK_PIN(
+               6, "GPIO6",
+               MTK_EINT_FUNCTION(0, 6),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO6"),
+               MTK_FUNCTION(1, "SPI7_MO"),
+               MTK_FUNCTION(2, "I2S0_LRCK"),
+               MTK_FUNCTION(3, "TDM_DATA3_2ND"),
+               MTK_FUNCTION(4, "PCM1_DI"),
+               MTK_FUNCTION(5, "DMIC_CLK"),
+               MTK_FUNCTION(6, "TP_GPIO6_AO"),
+               MTK_FUNCTION(7, "SCL9")
+       ),
+       MTK_PIN(
+               7, "GPIO7",
+               MTK_EINT_FUNCTION(0, 7),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO7"),
+               MTK_FUNCTION(1, "SPI7_CLK"),
+               MTK_FUNCTION(2, "I2S0_DI"),
+               MTK_FUNCTION(3, "SRCLKENAI1"),
+               MTK_FUNCTION(4, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(5, "DMIC_DAT"),
+               MTK_FUNCTION(6, "TP_GPIO7_AO"),
+               MTK_FUNCTION(7, "SDA9")
+       ),
+       MTK_PIN(
+               8, "GPIO8",
+               MTK_EINT_FUNCTION(0, 8),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO8"),
+               MTK_FUNCTION(1, "PWM_0"),
+               MTK_FUNCTION(2, "I2S2_DI2"),
+               MTK_FUNCTION(3, "SRCLKENAI0"),
+               MTK_FUNCTION(4, "URXD1"),
+               MTK_FUNCTION(5, "I2S0_MCK"),
+               MTK_FUNCTION(6, "CONN_MCU_DBGACK_N"),
+               MTK_FUNCTION(7, "IDDIG")
+       ),
+       MTK_PIN(
+               9, "GPIO9",
+               MTK_EINT_FUNCTION(0, 9),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO9"),
+               MTK_FUNCTION(1, "PWM_3"),
+               MTK_FUNCTION(2, "MD_INT0"),
+               MTK_FUNCTION(3, "SRCLKENAI1"),
+               MTK_FUNCTION(4, "UTXD1"),
+               MTK_FUNCTION(5, "I2S0_BCK"),
+               MTK_FUNCTION(6, "CONN_MCU_TRST_B"),
+               MTK_FUNCTION(7, "USB_DRVVBUS")
+       ),
+       MTK_PIN(
+               10, "GPIO10",
+               MTK_EINT_FUNCTION(0, 10),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO10"),
+               MTK_FUNCTION(1, "MSDC1_CLK_A"),
+               MTK_FUNCTION(2, "TP_URXD1_AO"),
+               MTK_FUNCTION(3, "I2S1_LRCK"),
+               MTK_FUNCTION(4, "UCTS0"),
+               MTK_FUNCTION(5, "DMIC1_CLK"),
+               MTK_FUNCTION(6, "KPCOL2"),
+               MTK_FUNCTION(7, "SCL8")
+       ),
+       MTK_PIN(
+               11, "GPIO11",
+               MTK_EINT_FUNCTION(0, 11),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO11"),
+               MTK_FUNCTION(1, "MSDC1_CMD_A"),
+               MTK_FUNCTION(2, "TP_UTXD1_AO"),
+               MTK_FUNCTION(3, "I2S1_DO"),
+               MTK_FUNCTION(4, "URTS0"),
+               MTK_FUNCTION(5, "DMIC1_DAT"),
+               MTK_FUNCTION(6, "KPROW2"),
+               MTK_FUNCTION(7, "SDA8")
+       ),
+       MTK_PIN(
+               12, "GPIO12",
+               MTK_EINT_FUNCTION(0, 12),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO12"),
+               MTK_FUNCTION(1, "MSDC1_DAT3_A"),
+               MTK_FUNCTION(2, "TP_URXD2_AO"),
+               MTK_FUNCTION(3, "I2S1_MCK"),
+               MTK_FUNCTION(4, "UCTS1"),
+               MTK_FUNCTION(5, "DMIC_CLK"),
+               MTK_FUNCTION(6, "ANT_SEL9"),
+               MTK_FUNCTION(7, "SCL9")
+       ),
+       MTK_PIN(
+               13, "GPIO13",
+               MTK_EINT_FUNCTION(0, 13),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO13"),
+               MTK_FUNCTION(1, "MSDC1_DAT0_A"),
+               MTK_FUNCTION(2, "TP_UTXD2_AO"),
+               MTK_FUNCTION(3, "I2S1_BCK"),
+               MTK_FUNCTION(4, "URTS1"),
+               MTK_FUNCTION(5, "DMIC_DAT"),
+               MTK_FUNCTION(6, "ANT_SEL10"),
+               MTK_FUNCTION(7, "SDA9")
+       ),
+       MTK_PIN(
+               14, "GPIO14",
+               MTK_EINT_FUNCTION(0, 14),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO14"),
+               MTK_FUNCTION(1, "MSDC1_DAT2_A"),
+               MTK_FUNCTION(2, "PWM_3"),
+               MTK_FUNCTION(3, "IDDIG"),
+               MTK_FUNCTION(4, "MD_INT0"),
+               MTK_FUNCTION(5, "PTA_RXD"),
+               MTK_FUNCTION(6, "ANT_SEL11")
+       ),
+       MTK_PIN(
+               15, "GPIO15",
+               MTK_EINT_FUNCTION(0, 15),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO15"),
+               MTK_FUNCTION(1, "MSDC1_DAT1_A"),
+               MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(3, "USB_DRVVBUS"),
+               MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+               MTK_FUNCTION(5, "PTA_TXD"),
+               MTK_FUNCTION(6, "ANT_SEL12")
+       ),
+       MTK_PIN(
+               16, "GPIO16",
+               MTK_EINT_FUNCTION(0, 16),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO16"),
+               MTK_FUNCTION(1, "SRCLKENAI0"),
+               MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+               MTK_FUNCTION(3, "MFG_EJTAG_TRSTN"),
+               MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+               MTK_FUNCTION(5, "CONN_TCXOENA_REQ"),
+               MTK_FUNCTION(6, "PWM_2"),
+               MTK_FUNCTION(7, "JTRSTN_SEL1")
+       ),
+       MTK_PIN(
+               17, "GPIO17",
+               MTK_EINT_FUNCTION(0, 17),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO17"),
+               MTK_FUNCTION(1, "SPI0_A_MI"),
+               MTK_FUNCTION(2, "SCP_SPI0_MI"),
+               MTK_FUNCTION(3, "MFG_EJTAG_TDO"),
+               MTK_FUNCTION(4, "DPI_HSYNC"),
+               MTK_FUNCTION(5, "MFG_DFD_JTAG_TDO"),
+               MTK_FUNCTION(6, "DFD_TDO"),
+               MTK_FUNCTION(7, "JTDO_SEL1")
+       ),
+       MTK_PIN(
+               18, "GPIO18",
+               MTK_EINT_FUNCTION(0, 18),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO18"),
+               MTK_FUNCTION(1, "SPI0_A_MO"),
+               MTK_FUNCTION(2, "SCP_SPI0_MO"),
+               MTK_FUNCTION(3, "MFG_EJTAG_TDI"),
+               MTK_FUNCTION(4, "DPI_VSYNC"),
+               MTK_FUNCTION(5, "MFG_DFD_JTAG_TDI"),
+               MTK_FUNCTION(6, "DFD_TDI"),
+               MTK_FUNCTION(7, "JTDI_SEL1")
+       ),
+       MTK_PIN(
+               19, "GPIO19",
+               MTK_EINT_FUNCTION(0, 19),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO19"),
+               MTK_FUNCTION(1, "SPI0_A_CSB"),
+               MTK_FUNCTION(2, "SCP_SPI0_CS"),
+               MTK_FUNCTION(3, "MFG_EJTAG_TMS"),
+               MTK_FUNCTION(4, "DPI_DE"),
+               MTK_FUNCTION(5, "MFG_DFD_JTAG_TMS"),
+               MTK_FUNCTION(6, "DFD_TMS"),
+               MTK_FUNCTION(7, "JTMS_SEL1")
+       ),
+       MTK_PIN(
+               20, "GPIO20",
+               MTK_EINT_FUNCTION(0, 20),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO20"),
+               MTK_FUNCTION(1, "SPI0_A_CLK"),
+               MTK_FUNCTION(2, "SCP_SPI0_CK"),
+               MTK_FUNCTION(3, "MFG_EJTAG_TCK"),
+               MTK_FUNCTION(4, "DPI_CK"),
+               MTK_FUNCTION(5, "MFG_DFD_JTAG_TCK"),
+               MTK_FUNCTION(6, "DFD_TCK_XI"),
+               MTK_FUNCTION(7, "JTCK_SEL1")
+       ),
+       MTK_PIN(
+               21, "GPIO21",
+               MTK_EINT_FUNCTION(0, 21),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO21"),
+               MTK_FUNCTION(1, "PWM_0"),
+               MTK_FUNCTION(2, "CMFLASH0"),
+               MTK_FUNCTION(3, "CMVREF2"),
+               MTK_FUNCTION(4, "CLKM0"),
+               MTK_FUNCTION(5, "ANT_SEL9"),
+               MTK_FUNCTION(6, "CONN_TCXOENA_REQ"),
+               MTK_FUNCTION(7, "DBG_MON_A27")
+       ),
+       MTK_PIN(
+               22, "GPIO22",
+               MTK_EINT_FUNCTION(0, 22),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO22"),
+               MTK_FUNCTION(1, "PWM_1"),
+               MTK_FUNCTION(2, "CMFLASH1"),
+               MTK_FUNCTION(3, "CMVREF3"),
+               MTK_FUNCTION(4, "CLKM1"),
+               MTK_FUNCTION(5, "ANT_SEL10"),
+               MTK_FUNCTION(7, "DBG_MON_A28")
+       ),
+       MTK_PIN(
+               23, "GPIO23",
+               MTK_EINT_FUNCTION(0, 23),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO23"),
+               MTK_FUNCTION(1, "PWM_2"),
+               MTK_FUNCTION(2, "CMFLASH2"),
+               MTK_FUNCTION(3, "CMVREF0"),
+               MTK_FUNCTION(4, "CLKM2"),
+               MTK_FUNCTION(5, "ANT_SEL11"),
+               MTK_FUNCTION(7, "DBG_MON_A29")
+       ),
+       MTK_PIN(
+               24, "GPIO24",
+               MTK_EINT_FUNCTION(0, 24),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO24"),
+               MTK_FUNCTION(1, "PWM_0"),
+               MTK_FUNCTION(2, "CMFLASH3"),
+               MTK_FUNCTION(3, "CMVREF1"),
+               MTK_FUNCTION(4, "CLKM3"),
+               MTK_FUNCTION(5, "ANT_SEL12"),
+               MTK_FUNCTION(7, "DBG_MON_A30")
+       ),
+       MTK_PIN(
+               25, "GPIO25",
+               MTK_EINT_FUNCTION(0, 25),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO25"),
+               MTK_FUNCTION(1, "SRCLKENAI0"),
+               MTK_FUNCTION(2, "UCTS0"),
+               MTK_FUNCTION(3, "SCL8"),
+               MTK_FUNCTION(4, "CMVREF4"),
+               MTK_FUNCTION(5, "I2S0_LRCK"),
+               MTK_FUNCTION(6, "CONN_TCXOENA_REQ"),
+               MTK_FUNCTION(7, "DBG_MON_A31")
+       ),
+       MTK_PIN(
+               26, "GPIO26",
+               MTK_EINT_FUNCTION(0, 26),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO26"),
+               MTK_FUNCTION(1, "PWM_0"),
+               MTK_FUNCTION(2, "URTS0"),
+               MTK_FUNCTION(3, "SDA8"),
+               MTK_FUNCTION(4, "CLKM0"),
+               MTK_FUNCTION(5, "I2S0_DI"),
+               MTK_FUNCTION(6, "AGPS_SYNC"),
+               MTK_FUNCTION(7, "DBG_MON_A32")
+       ),
+       MTK_PIN(
+               27, "GPIO27",
+               MTK_EINT_FUNCTION(0, 27),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO27"),
+               MTK_FUNCTION(1, "AP_GOOD")
+       ),
+       MTK_PIN(
+               28, "GPIO28",
+               MTK_EINT_FUNCTION(0, 28),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO28"),
+               MTK_FUNCTION(1, "SCL5")
+       ),
+       MTK_PIN(
+               29, "GPIO29",
+               MTK_EINT_FUNCTION(0, 29),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO29"),
+               MTK_FUNCTION(1, "SDA5")
+       ),
+       MTK_PIN(
+               30, "GPIO30",
+               MTK_EINT_FUNCTION(0, 30),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO30"),
+               MTK_FUNCTION(1, "I2S1_MCK"),
+               MTK_FUNCTION(2, "I2S3_MCK"),
+               MTK_FUNCTION(3, "I2S2_MCK"),
+               MTK_FUNCTION(4, "DPI_D0"),
+               MTK_FUNCTION(5, "SPI4_MI"),
+               MTK_FUNCTION(6, "CONN_MCU_DBGI_N")
+       ),
+       MTK_PIN(
+               31, "GPIO31",
+               MTK_EINT_FUNCTION(0, 31),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO31"),
+               MTK_FUNCTION(1, "I2S1_BCK"),
+               MTK_FUNCTION(2, "I2S3_BCK"),
+               MTK_FUNCTION(3, "I2S2_BCK"),
+               MTK_FUNCTION(4, "DPI_D1"),
+               MTK_FUNCTION(5, "SPI4_CSB"),
+               MTK_FUNCTION(6, "CONN_MCU_TDO")
+       ),
+       MTK_PIN(
+               32, "GPIO32",
+               MTK_EINT_FUNCTION(0, 32),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO32"),
+               MTK_FUNCTION(1, "I2S1_LRCK"),
+               MTK_FUNCTION(2, "I2S3_LRCK"),
+               MTK_FUNCTION(3, "I2S2_LRCK"),
+               MTK_FUNCTION(4, "DPI_D2"),
+               MTK_FUNCTION(5, "SPI4_MO"),
+               MTK_FUNCTION(6, "CONN_MCU_TDI")
+       ),
+       MTK_PIN(
+               33, "GPIO33",
+               MTK_EINT_FUNCTION(0, 33),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO33"),
+               MTK_FUNCTION(1, "I2S2_DI"),
+               MTK_FUNCTION(2, "I2S0_DI"),
+               MTK_FUNCTION(3, "I2S5_DO"),
+               MTK_FUNCTION(4, "DPI_D3"),
+               MTK_FUNCTION(5, "SPI4_CLK"),
+               MTK_FUNCTION(6, "CONN_MCU_TMS")
+       ),
+       MTK_PIN(
+               34, "GPIO34",
+               MTK_EINT_FUNCTION(0, 34),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO34"),
+               MTK_FUNCTION(1, "I2S1_DO"),
+               MTK_FUNCTION(2, "I2S3_DO"),
+               MTK_FUNCTION(3, "I2S2_DI2"),
+               MTK_FUNCTION(4, "DPI_D4"),
+               MTK_FUNCTION(5, "AGPS_SYNC"),
+               MTK_FUNCTION(6, "CONN_MCU_TCK")
+       ),
+       MTK_PIN(
+               35, "GPIO35",
+               MTK_EINT_FUNCTION(0, 35),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO35"),
+               MTK_FUNCTION(1, "TDM_LRCK"),
+               MTK_FUNCTION(2, "I2S1_LRCK"),
+               MTK_FUNCTION(3, "I2S5_LRCK"),
+               MTK_FUNCTION(4, "DPI_D5"),
+               MTK_FUNCTION(5, "SPI5_A_MO"),
+               MTK_FUNCTION(6, "IO_JTAG_TDI"),
+               MTK_FUNCTION(7, "PWM_2")
+       ),
+       MTK_PIN(
+               36, "GPIO36",
+               MTK_EINT_FUNCTION(0, 36),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO36"),
+               MTK_FUNCTION(1, "TDM_BCK"),
+               MTK_FUNCTION(2, "I2S1_BCK"),
+               MTK_FUNCTION(3, "I2S5_BCK"),
+               MTK_FUNCTION(4, "DPI_D6"),
+               MTK_FUNCTION(5, "SPI5_A_CSB"),
+               MTK_FUNCTION(6, "IO_JTAG_TRSTN"),
+               MTK_FUNCTION(7, "SRCLKENAI1")
+       ),
+       MTK_PIN(
+               37, "GPIO37",
+               MTK_EINT_FUNCTION(0, 37),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO37"),
+               MTK_FUNCTION(1, "TDM_MCK"),
+               MTK_FUNCTION(2, "I2S1_MCK"),
+               MTK_FUNCTION(3, "I2S5_MCK"),
+               MTK_FUNCTION(4, "DPI_D7"),
+               MTK_FUNCTION(5, "SPI5_A_MI"),
+               MTK_FUNCTION(6, "IO_JTAG_TCK"),
+               MTK_FUNCTION(7, "SRCLKENAI0")
+       ),
+       MTK_PIN(
+               38, "GPIO38",
+               MTK_EINT_FUNCTION(0, 38),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO38"),
+               MTK_FUNCTION(1, "TDM_DATA0"),
+               MTK_FUNCTION(2, "I2S2_DI"),
+               MTK_FUNCTION(3, "I2S5_DO"),
+               MTK_FUNCTION(4, "DPI_D8"),
+               MTK_FUNCTION(5, "SPI5_A_CLK"),
+               MTK_FUNCTION(6, "IO_JTAG_TDO"),
+               MTK_FUNCTION(7, "CONN_TCXOENA_REQ")
+       ),
+       MTK_PIN(
+               39, "GPIO39",
+               MTK_EINT_FUNCTION(0, 39),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO39"),
+               MTK_FUNCTION(1, "TDM_DATA1"),
+               MTK_FUNCTION(2, "I2S1_DO"),
+               MTK_FUNCTION(3, "I2S2_DI2"),
+               MTK_FUNCTION(4, "DPI_D9"),
+               MTK_FUNCTION(5, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(6, "IO_JTAG_TMS"),
+               MTK_FUNCTION(7, "IDDIG")
+       ),
+       MTK_PIN(
+               40, "GPIO40",
+               MTK_EINT_FUNCTION(0, 40),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO40"),
+               MTK_FUNCTION(1, "TDM_DATA2"),
+               MTK_FUNCTION(2, "SCL9"),
+               MTK_FUNCTION(3, "PWM_3"),
+               MTK_FUNCTION(4, "DPI_D10"),
+               MTK_FUNCTION(5, "SRCLKENAI0"),
+               MTK_FUNCTION(6, "DAP_MD32_SWD"),
+               MTK_FUNCTION(7, "USB_DRVVBUS")
+       ),
+       MTK_PIN(
+               41, "GPIO41",
+               MTK_EINT_FUNCTION(0, 41),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO41"),
+               MTK_FUNCTION(1, "TDM_DATA3"),
+               MTK_FUNCTION(2, "SDA9"),
+               MTK_FUNCTION(3, "PWM_1"),
+               MTK_FUNCTION(4, "DPI_D11"),
+               MTK_FUNCTION(5, "CLKM1"),
+               MTK_FUNCTION(6, "DAP_MD32_SWCK")
+       ),
+       MTK_PIN(
+               42, "GPIO42",
+               MTK_EINT_FUNCTION(0, 42),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO42"),
+               MTK_FUNCTION(1, "DISP_PWM")
+       ),
+       MTK_PIN(
+               43, "GPIO43",
+               MTK_EINT_FUNCTION(0, 43),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO43"),
+               MTK_FUNCTION(1, "DSI_TE")
+       ),
+       MTK_PIN(
+               44, "GPIO44",
+               MTK_EINT_FUNCTION(0, 44),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO44"),
+               MTK_FUNCTION(1, "LCM_RST")
+       ),
+       MTK_PIN(
+               45, "GPIO45",
+               MTK_EINT_FUNCTION(0, 45),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO45"),
+               MTK_FUNCTION(1, "SCL6"),
+               MTK_FUNCTION(2, "SCP_SCL0"),
+               MTK_FUNCTION(3, "SCP_SCL1"),
+               MTK_FUNCTION(4, "SCL_6306")
+       ),
+       MTK_PIN(
+               46, "GPIO46",
+               MTK_EINT_FUNCTION(0, 46),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO46"),
+               MTK_FUNCTION(1, "SDA6"),
+               MTK_FUNCTION(2, "SCP_SDA0"),
+               MTK_FUNCTION(3, "SCP_SDA1"),
+               MTK_FUNCTION(4, "SDA_6306")
+       ),
+       MTK_PIN(
+               47, "GPIO47",
+               MTK_EINT_FUNCTION(0, 47),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO47"),
+               MTK_FUNCTION(1, "SPI1_A_MI"),
+               MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+               MTK_FUNCTION(3, "KPCOL2"),
+               MTK_FUNCTION(4, "MD_URXD0"),
+               MTK_FUNCTION(5, "CONN_UART0_RXD"),
+               MTK_FUNCTION(6, "SSPM_URXD_AO"),
+               MTK_FUNCTION(7, "DBG_MON_B32")
+       ),
+       MTK_PIN(
+               48, "GPIO48",
+               MTK_EINT_FUNCTION(0, 48),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO48"),
+               MTK_FUNCTION(1, "SPI1_A_CSB"),
+               MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+               MTK_FUNCTION(3, "KPROW2"),
+               MTK_FUNCTION(4, "MD_UTXD0"),
+               MTK_FUNCTION(5, "CONN_UART0_TXD"),
+               MTK_FUNCTION(6, "SSPM_UTXD_AO"),
+               MTK_FUNCTION(7, "DBG_MON_B31")
+       ),
+       MTK_PIN(
+               49, "GPIO49",
+               MTK_EINT_FUNCTION(0, 49),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO49"),
+               MTK_FUNCTION(1, "SPI1_A_MO"),
+               MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+               MTK_FUNCTION(3, "UCTS0"),
+               MTK_FUNCTION(4, "MD_URXD1"),
+               MTK_FUNCTION(5, "PWM_1"),
+               MTK_FUNCTION(6, "TP_URXD2_AO"),
+               MTK_FUNCTION(7, "DBG_MON_B30")
+       ),
+       MTK_PIN(
+               50, "GPIO50",
+               MTK_EINT_FUNCTION(0, 50),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO50"),
+               MTK_FUNCTION(1, "SPI1_A_CLK"),
+               MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+               MTK_FUNCTION(3, "URTS0"),
+               MTK_FUNCTION(4, "MD_UTXD1"),
+               MTK_FUNCTION(5, "WIFI_TXD"),
+               MTK_FUNCTION(6, "TP_UTXD2_AO"),
+               MTK_FUNCTION(7, "DBG_MON_B29")
+       ),
+       MTK_PIN(
+               51, "GPIO51",
+               MTK_EINT_FUNCTION(0, 51),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO51"),
+               MTK_FUNCTION(1, "SCL0")
+       ),
+       MTK_PIN(
+               52, "GPIO52",
+               MTK_EINT_FUNCTION(0, 52),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO52"),
+               MTK_FUNCTION(1, "SDA0")
+       ),
+       MTK_PIN(
+               53, "GPIO53",
+               MTK_EINT_FUNCTION(0, 53),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO53"),
+               MTK_FUNCTION(1, "URXD0"),
+               MTK_FUNCTION(2, "UTXD0"),
+               MTK_FUNCTION(3, "MD_URXD0"),
+               MTK_FUNCTION(4, "MD_URXD1"),
+               MTK_FUNCTION(5, "SSPM_URXD_AO"),
+               MTK_FUNCTION(7, "CONN_UART0_RXD")
+       ),
+       MTK_PIN(
+               54, "GPIO54",
+               MTK_EINT_FUNCTION(0, 54),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO54"),
+               MTK_FUNCTION(1, "UTXD0"),
+               MTK_FUNCTION(2, "URXD0"),
+               MTK_FUNCTION(3, "MD_UTXD0"),
+               MTK_FUNCTION(4, "MD_UTXD1"),
+               MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+               MTK_FUNCTION(6, "WIFI_TXD"),
+               MTK_FUNCTION(7, "CONN_UART0_TXD")
+       ),
+       MTK_PIN(
+               55, "GPIO55",
+               MTK_EINT_FUNCTION(0, 55),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO55"),
+               MTK_FUNCTION(1, "SCL3"),
+               MTK_FUNCTION(2, "SCP_SCL0"),
+               MTK_FUNCTION(3, "SCP_SCL1"),
+               MTK_FUNCTION(4, "SCL_6306")
+       ),
+       MTK_PIN(
+               56, "GPIO56",
+               MTK_EINT_FUNCTION(0, 56),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO56"),
+               MTK_FUNCTION(1, "SDA3"),
+               MTK_FUNCTION(2, "SCP_SDA0"),
+               MTK_FUNCTION(3, "SCP_SDA1"),
+               MTK_FUNCTION(4, "SDA_6306")
+       ),
+       MTK_PIN(
+               57, "GPIO57",
+               MTK_EINT_FUNCTION(0, 57),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO57"),
+               MTK_FUNCTION(1, "KPROW1"),
+               MTK_FUNCTION(2, "PWM_1"),
+               MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(4, "CLKM1"),
+               MTK_FUNCTION(5, "IDDIG"),
+               MTK_FUNCTION(6, "CONN_TCXOENA_REQ"),
+               MTK_FUNCTION(7, "MBISTREADEN_TRIGGER")
+       ),
+       MTK_PIN(
+               58, "GPIO58",
+               MTK_EINT_FUNCTION(0, 58),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO58"),
+               MTK_FUNCTION(1, "KPROW0"),
+               MTK_FUNCTION(7, "DBG_MON_B28")
+       ),
+       MTK_PIN(
+               59, "GPIO59",
+               MTK_EINT_FUNCTION(0, 59),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO59"),
+               MTK_FUNCTION(1, "KPCOL0"),
+               MTK_FUNCTION(7, "DBG_MON_B27")
+       ),
+       MTK_PIN(
+               60, "GPIO60",
+               MTK_EINT_FUNCTION(0, 60),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO60"),
+               MTK_FUNCTION(1, "KPCOL1"),
+               MTK_FUNCTION(2, "PWM_2"),
+               MTK_FUNCTION(3, "UCTS1"),
+               MTK_FUNCTION(4, "CLKM2"),
+               MTK_FUNCTION(5, "USB_DRVVBUS"),
+               MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER")
+       ),
+       MTK_PIN(
+               61, "GPIO61",
+               MTK_EINT_FUNCTION(0, 61),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO61"),
+               MTK_FUNCTION(1, "SCL1"),
+               MTK_FUNCTION(2, "SCP_SCL0"),
+               MTK_FUNCTION(3, "SCP_SCL1")
+       ),
+       MTK_PIN(
+               62, "GPIO62",
+               MTK_EINT_FUNCTION(0, 62),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO62"),
+               MTK_FUNCTION(1, "SDA1"),
+               MTK_FUNCTION(2, "SCP_SDA0"),
+               MTK_FUNCTION(3, "SCP_SDA1")
+       ),
+       MTK_PIN(
+               63, "GPIO63",
+               MTK_EINT_FUNCTION(0, 63),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO63"),
+               MTK_FUNCTION(1, "SPI2_MI"),
+               MTK_FUNCTION(2, "SCP_SPI2_MI"),
+               MTK_FUNCTION(3, "KPCOL2"),
+               MTK_FUNCTION(4, "MRG_DI"),
+               MTK_FUNCTION(5, "MD_URXD0"),
+               MTK_FUNCTION(6, "CONN_UART0_RXD"),
+               MTK_FUNCTION(7, "DBG_MON_B26")
+       ),
+       MTK_PIN(
+               64, "GPIO64",
+               MTK_EINT_FUNCTION(0, 64),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO64"),
+               MTK_FUNCTION(1, "SPI2_CSB"),
+               MTK_FUNCTION(2, "SCP_SPI2_CS"),
+               MTK_FUNCTION(3, "KPROW2"),
+               MTK_FUNCTION(4, "MRG_SYNC"),
+               MTK_FUNCTION(5, "MD_UTXD0"),
+               MTK_FUNCTION(6, "CONN_UART0_TXD"),
+               MTK_FUNCTION(7, "DBG_MON_B25")
+       ),
+       MTK_PIN(
+               65, "GPIO65",
+               MTK_EINT_FUNCTION(0, 65),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO65"),
+               MTK_FUNCTION(1, "SPI2_MO"),
+               MTK_FUNCTION(2, "SCP_SPI2_MO"),
+               MTK_FUNCTION(3, "SCP_SDA1"),
+               MTK_FUNCTION(4, "MRG_DO"),
+               MTK_FUNCTION(5, "MD_URXD1"),
+               MTK_FUNCTION(6, "PWM_3")
+       ),
+       MTK_PIN(
+               66, "GPIO66",
+               MTK_EINT_FUNCTION(0, 66),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO66"),
+               MTK_FUNCTION(1, "SPI2_CLK"),
+               MTK_FUNCTION(2, "SCP_SPI2_CK"),
+               MTK_FUNCTION(3, "SCP_SCL1"),
+               MTK_FUNCTION(4, "MRG_CLK"),
+               MTK_FUNCTION(5, "MD_UTXD1"),
+               MTK_FUNCTION(6, "WIFI_TXD")
+       ),
+       MTK_PIN(
+               67, "GPIO67",
+               MTK_EINT_FUNCTION(0, 67),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO67"),
+               MTK_FUNCTION(1, "I2S3_LRCK"),
+               MTK_FUNCTION(2, "I2S1_LRCK"),
+               MTK_FUNCTION(3, "URXD1"),
+               MTK_FUNCTION(4, "PCM0_SYNC"),
+               MTK_FUNCTION(5, "I2S5_LRCK"),
+               MTK_FUNCTION(6, "ANT_SEL9"),
+               MTK_FUNCTION(7, "DBG_MON_B10")
+       ),
+       MTK_PIN(
+               68, "GPIO68",
+               MTK_EINT_FUNCTION(0, 68),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO68"),
+               MTK_FUNCTION(1, "I2S3_DO"),
+               MTK_FUNCTION(2, "I2S1_DO"),
+               MTK_FUNCTION(3, "UTXD1"),
+               MTK_FUNCTION(4, "PCM0_DO"),
+               MTK_FUNCTION(5, "I2S5_DO"),
+               MTK_FUNCTION(6, "ANT_SEL10"),
+               MTK_FUNCTION(7, "DBG_MON_B9")
+       ),
+       MTK_PIN(
+               69, "GPIO69",
+               MTK_EINT_FUNCTION(0, 69),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO69"),
+               MTK_FUNCTION(1, "I2S3_MCK"),
+               MTK_FUNCTION(2, "I2S1_MCK"),
+               MTK_FUNCTION(3, "URTS1"),
+               MTK_FUNCTION(4, "AGPS_SYNC"),
+               MTK_FUNCTION(5, "I2S5_MCK"),
+               MTK_FUNCTION(6, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(7, "DBG_MON_B8")
+       ),
+       MTK_PIN(
+               70, "GPIO70",
+               MTK_EINT_FUNCTION(0, 70),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO70"),
+               MTK_FUNCTION(1, "I2S0_DI"),
+               MTK_FUNCTION(2, "I2S2_DI"),
+               MTK_FUNCTION(3, "KPCOL2"),
+               MTK_FUNCTION(4, "PCM0_DI"),
+               MTK_FUNCTION(5, "I2S2_DI2"),
+               MTK_FUNCTION(6, "ANT_SEL11"),
+               MTK_FUNCTION(7, "DBG_MON_B7")
+       ),
+       MTK_PIN(
+               71, "GPIO71",
+               MTK_EINT_FUNCTION(0, 71),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO71"),
+               MTK_FUNCTION(1, "I2S3_BCK"),
+               MTK_FUNCTION(2, "I2S1_BCK"),
+               MTK_FUNCTION(3, "KPROW2"),
+               MTK_FUNCTION(4, "PCM0_CLK"),
+               MTK_FUNCTION(5, "I2S5_BCK"),
+               MTK_FUNCTION(6, "ANT_SEL12"),
+               MTK_FUNCTION(7, "DBG_MON_B6")
+       ),
+       MTK_PIN(
+               72, "GPIO72",
+               MTK_EINT_FUNCTION(0, 72),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO72"),
+               MTK_FUNCTION(1, "BPI_BUS19_OLAT0"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS19_OLAT0")
+       ),
+       MTK_PIN(
+               73, "GPIO73",
+               MTK_EINT_FUNCTION(0, 73),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO73"),
+               MTK_FUNCTION(1, "BPI_BUS18_PA_VM1"),
+               MTK_FUNCTION(2, "CONN_MIPI5_SCLK"),
+               MTK_FUNCTION(3, "MIPI5_SCLK")
+       ),
+       MTK_PIN(
+               74, "GPIO74",
+               MTK_EINT_FUNCTION(0, 74),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO74"),
+               MTK_FUNCTION(1, "BPI_BUS17_PA_VM0"),
+               MTK_FUNCTION(2, "CONN_MIPI5_SDATA"),
+               MTK_FUNCTION(3, "MIPI5_SDATA")
+       ),
+       MTK_PIN(
+               75, "GPIO75",
+               MTK_EINT_FUNCTION(0, 75),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO75"),
+               MTK_FUNCTION(1, "BPI_BUS20_OLAT1"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS20_OLAT1"),
+               MTK_FUNCTION(3, "RFIC0_BSI_D2")
+       ),
+       MTK_PIN(
+               76, "GPIO76",
+               MTK_EINT_FUNCTION(0, 76),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO76"),
+               MTK_FUNCTION(1, "RFIC0_BSI_D1")
+       ),
+       MTK_PIN(
+               77, "GPIO77",
+               MTK_EINT_FUNCTION(0, 77),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO77"),
+               MTK_FUNCTION(1, "RFIC0_BSI_D0")
+       ),
+       MTK_PIN(
+               78, "GPIO78",
+               MTK_EINT_FUNCTION(0, 78),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO78"),
+               MTK_FUNCTION(1, "BPI_BUS7"),
+               MTK_FUNCTION(7, "DBG_MON_B24")
+       ),
+       MTK_PIN(
+               79, "GPIO79",
+               MTK_EINT_FUNCTION(0, 79),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO79"),
+               MTK_FUNCTION(1, "BPI_BUS6"),
+               MTK_FUNCTION(7, "DBG_MON_B23")
+       ),
+       MTK_PIN(
+               80, "GPIO80",
+               MTK_EINT_FUNCTION(0, 80),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO80"),
+               MTK_FUNCTION(1, "BPI_BUS8"),
+               MTK_FUNCTION(7, "DBG_MON_B22")
+       ),
+       MTK_PIN(
+               81, "GPIO81",
+               MTK_EINT_FUNCTION(0, 81),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO81"),
+               MTK_FUNCTION(1, "BPI_BUS9"),
+               MTK_FUNCTION(7, "DBG_MON_B21")
+       ),
+       MTK_PIN(
+               82, "GPIO82",
+               MTK_EINT_FUNCTION(0, 82),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO82"),
+               MTK_FUNCTION(1, "BPI_BUS10"),
+               MTK_FUNCTION(7, "DBG_MON_B20")
+       ),
+       MTK_PIN(
+               83, "GPIO83",
+               MTK_EINT_FUNCTION(0, 83),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO83"),
+               MTK_FUNCTION(1, "BPI_BUS11"),
+               MTK_FUNCTION(7, "DBG_MON_B19")
+       ),
+       MTK_PIN(
+               84, "GPIO84",
+               MTK_EINT_FUNCTION(0, 84),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO84"),
+               MTK_FUNCTION(1, "BPI_BUS12"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS12")
+       ),
+       MTK_PIN(
+               85, "GPIO85",
+               MTK_EINT_FUNCTION(0, 85),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO85"),
+               MTK_FUNCTION(1, "BPI_BUS13"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS13")
+       ),
+       MTK_PIN(
+               86, "GPIO86",
+               MTK_EINT_FUNCTION(0, 86),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO86"),
+               MTK_FUNCTION(1, "BPI_BUS14"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS14")
+       ),
+       MTK_PIN(
+               87, "GPIO87",
+               MTK_EINT_FUNCTION(0, 87),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO87"),
+               MTK_FUNCTION(1, "BPI_BUS15"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS15")
+       ),
+       MTK_PIN(
+               88, "GPIO88",
+               MTK_EINT_FUNCTION(0, 88),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO88"),
+               MTK_FUNCTION(1, "BPI_BUS16"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS16")
+       ),
+       MTK_PIN(
+               89, "GPIO89",
+               MTK_EINT_FUNCTION(0, 89),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO89"),
+               MTK_FUNCTION(1, "BPI_BUS5"),
+               MTK_FUNCTION(7, "DBG_MON_B18")
+       ),
+       MTK_PIN(
+               90, "GPIO90",
+               MTK_EINT_FUNCTION(0, 90),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO90"),
+               MTK_FUNCTION(1, "BPI_BUS4"),
+               MTK_FUNCTION(7, "DBG_MON_B17")
+       ),
+       MTK_PIN(
+               91, "GPIO91",
+               MTK_EINT_FUNCTION(0, 91),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO91"),
+               MTK_FUNCTION(1, "BPI_BUS3")
+       ),
+       MTK_PIN(
+               92, "GPIO92",
+               MTK_EINT_FUNCTION(0, 92),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO92"),
+               MTK_FUNCTION(1, "BPI_BUS2"),
+               MTK_FUNCTION(7, "DBG_MON_B16")
+       ),
+       MTK_PIN(
+               93, "GPIO93",
+               MTK_EINT_FUNCTION(0, 93),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO93"),
+               MTK_FUNCTION(1, "BPI_BUS1")
+       ),
+       MTK_PIN(
+               94, "GPIO94",
+               MTK_EINT_FUNCTION(0, 94),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO94"),
+               MTK_FUNCTION(1, "BPI_BUS0"),
+               MTK_FUNCTION(7, "DBG_MON_B15")
+       ),
+       MTK_PIN(
+               95, "GPIO95",
+               MTK_EINT_FUNCTION(0, 95),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO95"),
+               MTK_FUNCTION(1, "MIPI0_SDATA")
+       ),
+       MTK_PIN(
+               96, "GPIO96",
+               MTK_EINT_FUNCTION(0, 96),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO96"),
+               MTK_FUNCTION(1, "MIPI0_SCLK")
+       ),
+       MTK_PIN(
+               97, "GPIO97",
+               MTK_EINT_FUNCTION(0, 97),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO97"),
+               MTK_FUNCTION(1, "MIPI1_SDATA")
+       ),
+       MTK_PIN(
+               98, "GPIO98",
+               MTK_EINT_FUNCTION(0, 98),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO98"),
+               MTK_FUNCTION(1, "MIPI1_SCLK")
+       ),
+       MTK_PIN(
+               99, "GPIO99",
+               MTK_EINT_FUNCTION(0, 99),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO99"),
+               MTK_FUNCTION(1, "MIPI2_SCLK"),
+               MTK_FUNCTION(7, "DBG_MON_B14")
+       ),
+       MTK_PIN(
+               100, "GPIO100",
+               MTK_EINT_FUNCTION(0, 100),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO100"),
+               MTK_FUNCTION(1, "MIPI2_SDATA"),
+               MTK_FUNCTION(7, "DBG_MON_B13")
+       ),
+       MTK_PIN(
+               101, "GPIO101",
+               MTK_EINT_FUNCTION(0, 101),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO101"),
+               MTK_FUNCTION(1, "MIPI3_SCLK"),
+               MTK_FUNCTION(7, "DBG_MON_B12")
+       ),
+       MTK_PIN(
+               102, "GPIO102",
+               MTK_EINT_FUNCTION(0, 102),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO102"),
+               MTK_FUNCTION(1, "MIPI3_SDATA"),
+               MTK_FUNCTION(7, "DBG_MON_B11")
+       ),
+       MTK_PIN(
+               103, "GPIO103",
+               MTK_EINT_FUNCTION(0, 103),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO103"),
+               MTK_FUNCTION(1, "MIPI4_SCLK"),
+               MTK_FUNCTION(2, "CONN_MIPI4_SCLK")
+       ),
+       MTK_PIN(
+               104, "GPIO104",
+               MTK_EINT_FUNCTION(0, 104),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO104"),
+               MTK_FUNCTION(1, "MIPI4_SDATA"),
+               MTK_FUNCTION(2, "CONN_MIPI4_SDATA")
+       ),
+       MTK_PIN(
+               105, "GPIO105",
+               MTK_EINT_FUNCTION(0, 105),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO105"),
+               MTK_FUNCTION(1, "BPI_BUS22_OLAT3"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS22_OLAT3")
+       ),
+       MTK_PIN(
+               106, "GPIO106",
+               MTK_EINT_FUNCTION(0, 106),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO106"),
+               MTK_FUNCTION(1, "BPI_BUS21_OLAT2"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS21_OLAT2")
+       ),
+       MTK_PIN(
+               107, "GPIO107",
+               MTK_EINT_FUNCTION(0, 107),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO107"),
+               MTK_FUNCTION(1, "BPI_BUS24_ANT1"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS24_ANT1")
+       ),
+       MTK_PIN(
+               108, "GPIO108",
+               MTK_EINT_FUNCTION(0, 108),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO108"),
+               MTK_FUNCTION(1, "BPI_BUS25_ANT2"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS25_ANT2")
+       ),
+       MTK_PIN(
+               109, "GPIO109",
+               MTK_EINT_FUNCTION(0, 109),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO109"),
+               MTK_FUNCTION(1, "BPI_BUS23_ANT0"),
+               MTK_FUNCTION(2, "CONN_BPI_BUS23_ANT0")
+       ),
+       MTK_PIN(
+               110, "GPIO110",
+               MTK_EINT_FUNCTION(0, 110),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO110"),
+               MTK_FUNCTION(1, "SCL4")
+       ),
+       MTK_PIN(
+               111, "GPIO111",
+               MTK_EINT_FUNCTION(0, 111),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO111"),
+               MTK_FUNCTION(1, "SDA4")
+       ),
+       MTK_PIN(
+               112, "GPIO112",
+               MTK_EINT_FUNCTION(0, 112),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO112"),
+               MTK_FUNCTION(1, "SCL2")
+       ),
+       MTK_PIN(
+               113, "GPIO113",
+               MTK_EINT_FUNCTION(0, 113),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO113"),
+               MTK_FUNCTION(1, "SDA2")
+       ),
+       MTK_PIN(
+               114, "GPIO114",
+               MTK_EINT_FUNCTION(0, 114),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO114"),
+               MTK_FUNCTION(1, "CLKM0"),
+               MTK_FUNCTION(2, "SPI3_MI"),
+               MTK_FUNCTION(7, "DBG_MON_B5")
+       ),
+       MTK_PIN(
+               115, "GPIO115",
+               MTK_EINT_FUNCTION(0, 115),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO115"),
+               MTK_FUNCTION(1, "CLKM1"),
+               MTK_FUNCTION(2, "SPI3_CSB"),
+               MTK_FUNCTION(7, "DBG_MON_B4")
+       ),
+       MTK_PIN(
+               116, "GPIO116",
+               MTK_EINT_FUNCTION(0, 116),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO116"),
+               MTK_FUNCTION(1, "CMMCLK0"),
+               MTK_FUNCTION(7, "DBG_MON_B3")
+       ),
+       MTK_PIN(
+               117, "GPIO117",
+               MTK_EINT_FUNCTION(0, 117),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO117"),
+               MTK_FUNCTION(1, "CMMCLK1"),
+               MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(7, "DBG_MON_B2")
+       ),
+       MTK_PIN(
+               118, "GPIO118",
+               MTK_EINT_FUNCTION(0, 118),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO118"),
+               MTK_FUNCTION(1, "CLKM2"),
+               MTK_FUNCTION(2, "SPI3_MO"),
+               MTK_FUNCTION(7, "DBG_MON_B1")
+       ),
+       MTK_PIN(
+               119, "GPIO119",
+               MTK_EINT_FUNCTION(0, 119),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO119"),
+               MTK_FUNCTION(1, "CLKM3"),
+               MTK_FUNCTION(2, "SPI3_CLK"),
+               MTK_FUNCTION(7, "DBG_MON_B0")
+       ),
+       MTK_PIN(
+               120, "GPIO120",
+               MTK_EINT_FUNCTION(0, 120),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO120"),
+               MTK_FUNCTION(1, "CMMCLK2"),
+               MTK_FUNCTION(2, "CLKM2"),
+               MTK_FUNCTION(6, "ANT_SEL12"),
+               MTK_FUNCTION(7, "TP_UCTS2_AO")
+       ),
+       MTK_PIN(
+               121, "GPIO121",
+               MTK_EINT_FUNCTION(0, 121),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO121"),
+               MTK_FUNCTION(1, "CMMCLK3"),
+               MTK_FUNCTION(2, "CLKM3"),
+               MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+               MTK_FUNCTION(6, "ANT_SEL11"),
+               MTK_FUNCTION(7, "TP_URTS2_AO")
+       ),
+       MTK_PIN(
+               122, "GPIO122",
+               MTK_EINT_FUNCTION(0, 122),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO122"),
+               MTK_FUNCTION(1, "CMVREF1"),
+               MTK_FUNCTION(2, "PCM0_SYNC"),
+               MTK_FUNCTION(3, "SRCLKENAI1"),
+               MTK_FUNCTION(4, "AGPS_SYNC"),
+               MTK_FUNCTION(5, "PWM_1"),
+               MTK_FUNCTION(6, "ANT_SEL9"),
+               MTK_FUNCTION(7, "TP_UCTS1_AO")
+       ),
+       MTK_PIN(
+               123, "GPIO123",
+               MTK_EINT_FUNCTION(0, 123),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO123"),
+               MTK_FUNCTION(2, "PCM0_DI"),
+               MTK_FUNCTION(3, "ADSP_JTAG_TRSTN"),
+               MTK_FUNCTION(4, "VPU_UDI_NTRST"),
+               MTK_FUNCTION(5, "SPM_JTAG_TRSTN"),
+               MTK_FUNCTION(6, "SSPM_JTAG_TRSTN")
+       ),
+       MTK_PIN(
+               124, "GPIO124",
+               MTK_EINT_FUNCTION(0, 124),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO124"),
+               MTK_FUNCTION(1, "CMVREF2"),
+               MTK_FUNCTION(2, "PCM0_CLK"),
+               MTK_FUNCTION(3, "MD_INT0"),
+               MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+               MTK_FUNCTION(5, "PWM_2"),
+               MTK_FUNCTION(6, "ANT_SEL10"),
+               MTK_FUNCTION(7, "TP_URTS1_AO")
+       ),
+       MTK_PIN(
+               125, "GPIO125",
+               MTK_EINT_FUNCTION(0, 125),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO125"),
+               MTK_FUNCTION(1, "CMVREF3"),
+               MTK_FUNCTION(2, "PCM0_DO"),
+               MTK_FUNCTION(3, "ADSP_JTAG_TMS"),
+               MTK_FUNCTION(4, "VPU_UDI_TMS"),
+               MTK_FUNCTION(5, "SPM_JTAG_TMS"),
+               MTK_FUNCTION(6, "SSPM_JTAG_TMS")
+       ),
+       MTK_PIN(
+               126, "GPIO126",
+               MTK_EINT_FUNCTION(0, 126),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO126"),
+               MTK_FUNCTION(1, "CMVREF4"),
+               MTK_FUNCTION(2, "CMFLASH0"),
+               MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC")
+       ),
+       MTK_PIN(
+               127, "GPIO127",
+               MTK_EINT_FUNCTION(0, 127),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO127"),
+               MTK_FUNCTION(1, "CMVREF0"),
+               MTK_FUNCTION(2, "CMFLASH1"),
+               MTK_FUNCTION(6, "CONN_MCU_AICE_TCKC")
+       ),
+       MTK_PIN(
+               128, "GPIO128",
+               MTK_EINT_FUNCTION(0, 128),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO128"),
+               MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+               MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+               MTK_FUNCTION(3, "CCU_JTAG_TRST"),
+               MTK_FUNCTION(4, "CONN_DSP_JINTP"),
+               MTK_FUNCTION(5, "SCP_JTAG_TRSTN"),
+               MTK_FUNCTION(6, "LVTS_FOUT"),
+               MTK_FUNCTION(7, "DBG_MON_A3")
+       ),
+       MTK_PIN(
+               129, "GPIO129",
+               MTK_EINT_FUNCTION(0, 129),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO129"),
+               MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+               MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+               MTK_FUNCTION(3, "CCU_JTAG_TCK"),
+               MTK_FUNCTION(4, "CONN_DSP_JCK"),
+               MTK_FUNCTION(5, "SCP_JTAG_TCK"),
+               MTK_FUNCTION(6, "LVTS_SDO"),
+               MTK_FUNCTION(7, "DBG_MON_A4")
+       ),
+       MTK_PIN(
+               130, "GPIO130",
+               MTK_EINT_FUNCTION(0, 130),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO130"),
+               MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+               MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+               MTK_FUNCTION(6, "LVTS_26M"),
+               MTK_FUNCTION(7, "DBG_MON_A5")
+       ),
+       MTK_PIN(
+               131, "GPIO131",
+               MTK_EINT_FUNCTION(0, 131),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO131"),
+               MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+               MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+               MTK_FUNCTION(3, "CCU_JTAG_TDI"),
+               MTK_FUNCTION(4, "CONN_DSP_JDI"),
+               MTK_FUNCTION(5, "SCP_JTAG_TDI"),
+               MTK_FUNCTION(6, "LVTS_SCK"),
+               MTK_FUNCTION(7, "DBG_MON_A0")
+       ),
+       MTK_PIN(
+               132, "GPIO132",
+               MTK_EINT_FUNCTION(0, 132),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO132"),
+               MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+               MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+               MTK_FUNCTION(3, "CCU_JTAG_TMS"),
+               MTK_FUNCTION(4, "CONN_DSP_JMS"),
+               MTK_FUNCTION(5, "SCP_JTAG_TMS"),
+               MTK_FUNCTION(6, "LVTS_SDI"),
+               MTK_FUNCTION(7, "DBG_MON_A1")
+       ),
+       MTK_PIN(
+               133, "GPIO133",
+               MTK_EINT_FUNCTION(0, 133),
+               DRV_GRP0,
+               MTK_FUNCTION(0, "GPIO133"),
+               MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+               MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+               MTK_FUNCTION(3, "CCU_JTAG_TDO"),
+               MTK_FUNCTION(4, "CONN_DSP_JDO"),
+               MTK_FUNCTION(5, "SCP_JTAG_TDO"),
+               MTK_FUNCTION(6, "LVTS_SCF"),
+               MTK_FUNCTION(7, "DBG_MON_A2")
+       ),
+       MTK_PIN(
+               134, "GPIO134",
+               MTK_EINT_FUNCTION(0, 134),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO134"),
+               MTK_FUNCTION(1, "MSDC1_CLK"),
+               MTK_FUNCTION(2, "PCM1_CLK"),
+               MTK_FUNCTION(3, "SPI5_B_MI"),
+               MTK_FUNCTION(4, "UDI_TCK"),
+               MTK_FUNCTION(5, "CONN_DSP_JCK"),
+               MTK_FUNCTION(6, "IPU_JTAG_TCK"),
+               MTK_FUNCTION(7, "JTCK_SEL3")
+       ),
+       MTK_PIN(
+               135, "GPIO135",
+               MTK_EINT_FUNCTION(0, 135),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO135"),
+               MTK_FUNCTION(1, "MSDC1_CMD"),
+               MTK_FUNCTION(2, "PCM1_SYNC"),
+               MTK_FUNCTION(3, "SPI5_B_CSB"),
+               MTK_FUNCTION(4, "UDI_TMS"),
+               MTK_FUNCTION(5, "CONN_DSP_JMS"),
+               MTK_FUNCTION(6, "IPU_JTAG_TMS"),
+               MTK_FUNCTION(7, "JTMS_SEL3")
+       ),
+       MTK_PIN(
+               136, "GPIO136",
+               MTK_EINT_FUNCTION(0, 136),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO136"),
+               MTK_FUNCTION(1, "MSDC1_DAT3"),
+               MTK_FUNCTION(2, "PCM1_DI"),
+               MTK_FUNCTION(3, "SPI5_B_MO"),
+               MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+               MTK_FUNCTION(5, "CONN_DSP_JINTP"),
+               MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC")
+       ),
+       MTK_PIN(
+               137, "GPIO137",
+               MTK_EINT_FUNCTION(0, 137),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO137"),
+               MTK_FUNCTION(1, "MSDC1_DAT0"),
+               MTK_FUNCTION(2, "PCM1_DO0"),
+               MTK_FUNCTION(3, "SPI5_B_CLK"),
+               MTK_FUNCTION(4, "UDI_TDI"),
+               MTK_FUNCTION(5, "CONN_DSP_JDI"),
+               MTK_FUNCTION(6, "IPU_JTAG_TDI"),
+               MTK_FUNCTION(7, "JTDI_SEL3")
+       ),
+       MTK_PIN(
+               138, "GPIO138",
+               MTK_EINT_FUNCTION(0, 138),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO138"),
+               MTK_FUNCTION(1, "MSDC1_DAT2"),
+               MTK_FUNCTION(2, "PCM1_DO2"),
+               MTK_FUNCTION(3, "ANT_SEL11"),
+               MTK_FUNCTION(4, "UDI_NTRST"),
+               MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"),
+               MTK_FUNCTION(6, "IPU_JTAG_TRST"),
+               MTK_FUNCTION(7, "JTRSTN_SEL3")
+       ),
+       MTK_PIN(
+               139, "GPIO139",
+               MTK_EINT_FUNCTION(0, 139),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO139"),
+               MTK_FUNCTION(1, "MSDC1_DAT1"),
+               MTK_FUNCTION(2, "PCM1_DO1"),
+               MTK_FUNCTION(3, "ANT_SEL12"),
+               MTK_FUNCTION(4, "UDI_TDO"),
+               MTK_FUNCTION(5, "CONN_DSP_JDO"),
+               MTK_FUNCTION(6, "IPU_JTAG_TDO"),
+               MTK_FUNCTION(7, "JTDO_SEL3")
+       ),
+       MTK_PIN(
+               140, "GPIO140",
+               MTK_EINT_FUNCTION(0, 140),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO140"),
+               MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+               MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+               MTK_FUNCTION(3, "ADSP_URXD0"),
+               MTK_FUNCTION(4, "SCL_6306"),
+               MTK_FUNCTION(5, "PTA_RXD"),
+               MTK_FUNCTION(6, "SSPM_URXD_AO")
+       ),
+       MTK_PIN(
+               141, "GPIO141",
+               MTK_EINT_FUNCTION(0, 141),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO141"),
+               MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+               MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+               MTK_FUNCTION(3, "ADSP_UTXD0"),
+               MTK_FUNCTION(4, "SDA_6306"),
+               MTK_FUNCTION(5, "PTA_TXD"),
+               MTK_FUNCTION(6, "SSPM_UTXD_AO")
+       ),
+       MTK_PIN(
+               142, "GPIO142",
+               MTK_EINT_FUNCTION(0, 142),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO142"),
+               MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+               MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+       ),
+       MTK_PIN(
+               143, "GPIO143",
+               MTK_EINT_FUNCTION(0, 143),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO143"),
+               MTK_FUNCTION(1, "AUD_DAT_MOSI2"),
+               MTK_FUNCTION(7, "DBG_MON_A9")
+       ),
+       MTK_PIN(
+               144, "GPIO144",
+               MTK_EINT_FUNCTION(0, 144),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO144"),
+               MTK_FUNCTION(1, "AUD_NLE_MOSI1"),
+               MTK_FUNCTION(2, "AUD_CLK_MISO"),
+               MTK_FUNCTION(3, "I2S2_MCK"),
+               MTK_FUNCTION(5, "UDI_TCK"),
+               MTK_FUNCTION(6, "UFS_UNIPRO_SDA"),
+               MTK_FUNCTION(7, "DBG_MON_A10")
+       ),
+       MTK_PIN(
+               145, "GPIO145",
+               MTK_EINT_FUNCTION(0, 145),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO145"),
+               MTK_FUNCTION(1, "AUD_NLE_MOSI0"),
+               MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+               MTK_FUNCTION(3, "I2S2_BCK"),
+               MTK_FUNCTION(5, "UDI_TMS"),
+               MTK_FUNCTION(7, "DBG_MON_A11")
+       ),
+       MTK_PIN(
+               146, "GPIO146",
+               MTK_EINT_FUNCTION(0, 146),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO146"),
+               MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+               MTK_FUNCTION(3, "I2S2_DI2"),
+               MTK_FUNCTION(5, "UDI_TDO"),
+               MTK_FUNCTION(7, "DBG_MON_A14")
+       ),
+       MTK_PIN(
+               147, "GPIO147",
+               MTK_EINT_FUNCTION(0, 147),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO147"),
+               MTK_FUNCTION(1, "ANT_SEL0"),
+               MTK_FUNCTION(2, "PWM_3")
+       ),
+       MTK_PIN(
+               148, "GPIO148",
+               MTK_EINT_FUNCTION(0, 148),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO148"),
+               MTK_FUNCTION(1, "ANT_SEL1"),
+               MTK_FUNCTION(2, "SPI0_B_MI"),
+               MTK_FUNCTION(3, "SSPM_URXD_AO"),
+               MTK_FUNCTION(5, "TP_UCTS2_AO"),
+               MTK_FUNCTION(6, "CLKM0")
+       ),
+       MTK_PIN(
+               149, "GPIO149",
+               MTK_EINT_FUNCTION(0, 149),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO149"),
+               MTK_FUNCTION(1, "ANT_SEL2"),
+               MTK_FUNCTION(2, "SPI0_B_CSB"),
+               MTK_FUNCTION(3, "SSPM_UTXD_AO"),
+               MTK_FUNCTION(5, "TP_URTS2_AO"),
+               MTK_FUNCTION(6, "CONN_TCXOENA_REQ")
+       ),
+       MTK_PIN(
+               150, "GPIO150",
+               MTK_EINT_FUNCTION(0, 150),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO150"),
+               MTK_FUNCTION(1, "ANT_SEL3"),
+               MTK_FUNCTION(2, "SPI0_B_MO"),
+               MTK_FUNCTION(3, "UCTS1"),
+               MTK_FUNCTION(5, "TP_UCTS1_AO"),
+               MTK_FUNCTION(6, "IDDIG"),
+               MTK_FUNCTION(7, "SCL9")
+       ),
+       MTK_PIN(
+               151, "GPIO151",
+               MTK_EINT_FUNCTION(0, 151),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO151"),
+               MTK_FUNCTION(1, "ANT_SEL4"),
+               MTK_FUNCTION(2, "SPI0_B_CLK"),
+               MTK_FUNCTION(3, "URTS1"),
+               MTK_FUNCTION(5, "TP_URTS1_AO"),
+               MTK_FUNCTION(6, "USB_DRVVBUS"),
+               MTK_FUNCTION(7, "SDA9")
+       ),
+       MTK_PIN(
+               152, "GPIO152",
+               MTK_EINT_FUNCTION(0, 152),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO152"),
+               MTK_FUNCTION(1, "ANT_SEL5"),
+               MTK_FUNCTION(2, "SPI1_B_MI"),
+               MTK_FUNCTION(3, "CLKM3"),
+               MTK_FUNCTION(5, "TP_URXD1_AO"),
+               MTK_FUNCTION(6, "SCP_SPI1_B_MI"),
+               MTK_FUNCTION(7, "SCL8")
+       ),
+       MTK_PIN(
+               153, "GPIO153",
+               MTK_EINT_FUNCTION(0, 153),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO153"),
+               MTK_FUNCTION(1, "ANT_SEL6"),
+               MTK_FUNCTION(2, "SPI1_B_CSB"),
+               MTK_FUNCTION(3, "SRCLKENAI0"),
+               MTK_FUNCTION(4, "PWM_0"),
+               MTK_FUNCTION(5, "TP_UTXD1_AO"),
+               MTK_FUNCTION(6, "SCP_SPI1_B_CS"),
+               MTK_FUNCTION(7, "SDA8")
+       ),
+       MTK_PIN(
+               154, "GPIO154",
+               MTK_EINT_FUNCTION(0, 154),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO154"),
+               MTK_FUNCTION(1, "ANT_SEL7"),
+               MTK_FUNCTION(2, "SPI1_B_MO"),
+               MTK_FUNCTION(3, "SRCLKENAI1"),
+               MTK_FUNCTION(5, "TP_URXD2_AO"),
+               MTK_FUNCTION(6, "SCP_SPI1_B_MO")
+       ),
+       MTK_PIN(
+               155, "GPIO155",
+               MTK_EINT_FUNCTION(0, 155),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO155"),
+               MTK_FUNCTION(1, "ANT_SEL8"),
+               MTK_FUNCTION(2, "SPI1_B_CLK"),
+               MTK_FUNCTION(3, "MD_INT0"),
+               MTK_FUNCTION(5, "TP_UTXD2_AO"),
+               MTK_FUNCTION(6, "SCP_SPI1_B_CK"),
+               MTK_FUNCTION(7, "DBG_MON_A15")
+       ),
+       MTK_PIN(
+               156, "GPIO156",
+               MTK_EINT_FUNCTION(0, 156),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO156"),
+               MTK_FUNCTION(1, "CONN_TOP_CLK"),
+               MTK_FUNCTION(2, "AUXIF_CLK0"),
+               MTK_FUNCTION(7, "DBG_MON_A16")
+       ),
+       MTK_PIN(
+               157, "GPIO157",
+               MTK_EINT_FUNCTION(0, 157),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO157"),
+               MTK_FUNCTION(1, "CONN_TOP_DATA"),
+               MTK_FUNCTION(2, "AUXIF_ST0"),
+               MTK_FUNCTION(7, "DBG_MON_A17")
+       ),
+       MTK_PIN(
+               158, "GPIO158",
+               MTK_EINT_FUNCTION(0, 158),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO158"),
+               MTK_FUNCTION(1, "CONN_HRST_B"),
+               MTK_FUNCTION(7, "DBG_MON_A18")
+       ),
+       MTK_PIN(
+               159, "GPIO159",
+               MTK_EINT_FUNCTION(0, 159),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO159"),
+               MTK_FUNCTION(1, "CONN_WB_PTA"),
+               MTK_FUNCTION(7, "DBG_MON_A19")
+       ),
+       MTK_PIN(
+               160, "GPIO160",
+               MTK_EINT_FUNCTION(0, 160),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO160"),
+               MTK_FUNCTION(1, "CONN_BT_CLK"),
+               MTK_FUNCTION(2, "AUXIF_CLK1"),
+               MTK_FUNCTION(7, "DBG_MON_A20")
+       ),
+       MTK_PIN(
+               161, "GPIO161",
+               MTK_EINT_FUNCTION(0, 161),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO161"),
+               MTK_FUNCTION(1, "CONN_BT_DATA"),
+               MTK_FUNCTION(2, "AUXIF_ST1"),
+               MTK_FUNCTION(7, "DBG_MON_A21")
+       ),
+       MTK_PIN(
+               162, "GPIO162",
+               MTK_EINT_FUNCTION(0, 162),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO162"),
+               MTK_FUNCTION(1, "CONN_WF_CTRL0"),
+               MTK_FUNCTION(7, "DBG_MON_A22")
+       ),
+       MTK_PIN(
+               163, "GPIO163",
+               MTK_EINT_FUNCTION(0, 163),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO163"),
+               MTK_FUNCTION(1, "CONN_WF_CTRL1"),
+               MTK_FUNCTION(2, "UFS_MPHY_SCL"),
+               MTK_FUNCTION(7, "DBG_MON_A23")
+       ),
+       MTK_PIN(
+               164, "GPIO164",
+               MTK_EINT_FUNCTION(0, 164),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO164"),
+               MTK_FUNCTION(1, "CONN_WF_CTRL2"),
+               MTK_FUNCTION(2, "UFS_MPHY_SDA"),
+               MTK_FUNCTION(7, "DBG_MON_A24")
+       ),
+       MTK_PIN(
+               165, "GPIO165",
+               MTK_EINT_FUNCTION(0, 165),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO165"),
+               MTK_FUNCTION(1, "CONN_WF_CTRL3"),
+               MTK_FUNCTION(2, "UFS_UNIPRO_SDA"),
+               MTK_FUNCTION(7, "DBG_MON_A25")
+       ),
+       MTK_PIN(
+               166, "GPIO166",
+               MTK_EINT_FUNCTION(0, 166),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO166"),
+               MTK_FUNCTION(1, "CONN_WF_CTRL4"),
+               MTK_FUNCTION(2, "UFS_UNIPRO_SCL"),
+               MTK_FUNCTION(7, "DBG_MON_A26")
+       ),
+       MTK_PIN(
+               167, "GPIO167",
+               MTK_EINT_FUNCTION(0, 167),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO167"),
+               MTK_FUNCTION(1, "MSDC0_CMD")
+       ),
+       MTK_PIN(
+               168, "GPIO168",
+               MTK_EINT_FUNCTION(0, 168),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO168"),
+               MTK_FUNCTION(1, "MSDC0_DAT0")
+       ),
+       MTK_PIN(
+               169, "GPIO169",
+               MTK_EINT_FUNCTION(0, 169),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO169"),
+               MTK_FUNCTION(1, "MSDC0_DAT2")
+       ),
+       MTK_PIN(
+               170, "GPIO170",
+               MTK_EINT_FUNCTION(0, 170),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO170"),
+               MTK_FUNCTION(1, "MSDC0_DAT4")
+       ),
+       MTK_PIN(
+               171, "GPIO171",
+               MTK_EINT_FUNCTION(0, 171),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO171"),
+               MTK_FUNCTION(1, "MSDC0_DAT6")
+       ),
+       MTK_PIN(
+               172, "GPIO172",
+               MTK_EINT_FUNCTION(0, 172),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO172"),
+               MTK_FUNCTION(1, "MSDC0_DAT1")
+       ),
+       MTK_PIN(
+               173, "GPIO173",
+               MTK_EINT_FUNCTION(0, 173),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO173"),
+               MTK_FUNCTION(1, "MSDC0_DAT5")
+       ),
+       MTK_PIN(
+               174, "GPIO174",
+               MTK_EINT_FUNCTION(0, 174),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO174"),
+               MTK_FUNCTION(1, "MSDC0_DAT7")
+       ),
+       MTK_PIN(
+               175, "GPIO175",
+               MTK_EINT_FUNCTION(0, 175),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO175"),
+               MTK_FUNCTION(1, "MSDC0_DSL"),
+               MTK_FUNCTION(2, "ANT_SEL9")
+       ),
+       MTK_PIN(
+               176, "GPIO176",
+               MTK_EINT_FUNCTION(0, 176),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO176"),
+               MTK_FUNCTION(1, "MSDC0_CLK"),
+               MTK_FUNCTION(2, "ANT_SEL10")
+       ),
+       MTK_PIN(
+               177, "GPIO177",
+               MTK_EINT_FUNCTION(0, 177),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO177"),
+               MTK_FUNCTION(1, "MSDC0_DAT3")
+       ),
+       MTK_PIN(
+               178, "GPIO178",
+               MTK_EINT_FUNCTION(0, 178),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO178"),
+               MTK_FUNCTION(1, "MSDC0_RSTB")
+       ),
+       MTK_PIN(
+               179, "GPIO179",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO179"),
+               MTK_FUNCTION(1, "RFIC0_BSI_EN")
+       ),
+       MTK_PIN(
+               180, "GPIO180",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO180"),
+               MTK_FUNCTION(1, "RFIC0_BSI_CK")
+       ),
+       MTK_PIN(
+               181, "GPIO181",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO181"),
+               MTK_FUNCTION(1, "SRCLKENA0")
+       ),
+       MTK_PIN(
+               182, "GPIO182",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO182"),
+               MTK_FUNCTION(1, "SRCLKENA1")
+       ),
+       MTK_PIN(
+               183, "GPIO183",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO183"),
+               MTK_FUNCTION(1, "WATCHDOG")
+       ),
+       MTK_PIN(
+               184, "GPIO184",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO184"),
+               MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+               MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+       ),
+       MTK_PIN(
+               185, "GPIO185",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO185"),
+               MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+       ),
+       MTK_PIN(
+               186, "GPIO186",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO186"),
+               MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+               MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+       ),
+       MTK_PIN(
+               187, "GPIO187",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO187"),
+               MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+       ),
+       MTK_PIN(
+               188, "GPIO188",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO188"),
+               MTK_FUNCTION(1, "RTC32K_CK")
+       ),
+       MTK_PIN(
+               189, "GPIO189",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO189"),
+               MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+               MTK_FUNCTION(3, "I2S1_MCK"),
+               MTK_FUNCTION(6, "UFS_UNIPRO_SCL")
+       ),
+       MTK_PIN(
+               190, "GPIO190",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO190"),
+               MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+               MTK_FUNCTION(3, "I2S1_BCK"),
+               MTK_FUNCTION(7, "DBG_MON_A6")
+       ),
+       MTK_PIN(
+               191, "GPIO191",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO191"),
+               MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+               MTK_FUNCTION(3, "I2S1_LRCK"),
+               MTK_FUNCTION(7, "DBG_MON_A7")
+       ),
+       MTK_PIN(
+               192, "GPIO192",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO192"),
+               MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+               MTK_FUNCTION(3, "I2S1_DO"),
+               MTK_FUNCTION(6, "UFS_MPHY_SDA"),
+               MTK_FUNCTION(7, "DBG_MON_A8")
+       ),
+       MTK_PIN(
+               193, "GPIO193",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO193"),
+               MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+               MTK_FUNCTION(2, "VOW_DAT_MISO"),
+               MTK_FUNCTION(3, "I2S2_LRCK"),
+               MTK_FUNCTION(5, "UDI_TDI"),
+               MTK_FUNCTION(7, "DBG_MON_A12")
+       ),
+       MTK_PIN(
+               194, "GPIO194",
+               MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO194"),
+               MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+               MTK_FUNCTION(2, "VOW_CLK_MISO"),
+               MTK_FUNCTION(3, "I2S2_DI"),
+               MTK_FUNCTION(5, "UDI_NTRST"),
+               MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+               MTK_FUNCTION(7, "DBG_MON_A13")
+       ),
+       MTK_PIN(
+               195, "GPIO195",
+               MTK_EINT_FUNCTION(0, 179),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO195"),
+               MTK_FUNCTION(3, "ADSP_JTAG_TCK"),
+               MTK_FUNCTION(4, "VPU_UDI_TCK"),
+               MTK_FUNCTION(5, "SPM_JTAG_TCK"),
+               MTK_FUNCTION(6, "SSPM_JTAG_TCK")
+       ),
+       MTK_PIN(
+               196, "GPIO196",
+               MTK_EINT_FUNCTION(0, 180),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO196"),
+               MTK_FUNCTION(1, "CMMCLK4"),
+               MTK_FUNCTION(3, "ADSP_JTAG_TDI"),
+               MTK_FUNCTION(4, "VPU_UDI_TDI"),
+               MTK_FUNCTION(5, "SPM_JTAG_TDI"),
+               MTK_FUNCTION(6, "SSPM_JTAG_TDI")
+       ),
+       MTK_PIN(
+               197, "GPIO197",
+               MTK_EINT_FUNCTION(0, 181),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO197"),
+               MTK_FUNCTION(3, "ADSP_JTAG_TDO"),
+               MTK_FUNCTION(4, "VPU_UDI_TDO"),
+               MTK_FUNCTION(5, "SPM_JTAG_TDO"),
+               MTK_FUNCTION(6, "SSPM_JTAG_TDO")
+       ),
+       MTK_PIN(
+               198, "GPIO198",
+               MTK_EINT_FUNCTION(0, 182),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO198"),
+               MTK_FUNCTION(1, "SCL7")
+       ),
+       MTK_PIN(
+               199, "GPIO199",
+               MTK_EINT_FUNCTION(0, 183),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO199"),
+               MTK_FUNCTION(1, "SDA7")
+       ),
+       MTK_PIN(
+               200, "GPIO200",
+               MTK_EINT_FUNCTION(0, 184),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO200"),
+               MTK_FUNCTION(1, "URXD1"),
+               MTK_FUNCTION(2, "ADSP_URXD0"),
+               MTK_FUNCTION(3, "TP_URXD1_AO"),
+               MTK_FUNCTION(4, "SSPM_URXD_AO"),
+               MTK_FUNCTION(5, "TP_URXD2_AO"),
+               MTK_FUNCTION(6, "MBISTREADEN_TRIGGER")
+       ),
+       MTK_PIN(
+               201, "GPIO201",
+               MTK_EINT_FUNCTION(0, 185),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO201"),
+               MTK_FUNCTION(1, "UTXD1"),
+               MTK_FUNCTION(2, "ADSP_UTXD0"),
+               MTK_FUNCTION(3, "TP_UTXD1_AO"),
+               MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+               MTK_FUNCTION(5, "TP_UTXD2_AO"),
+               MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER")
+       ),
+       MTK_PIN(
+               202, "GPIO202",
+               MTK_EINT_FUNCTION(0, 186),
+               DRV_GRP4,
+               MTK_FUNCTION(0, "GPIO202"),
+               MTK_FUNCTION(1, "PWM_3"),
+               MTK_FUNCTION(2, "CLKM3")
+       ),
+       MTK_PIN(
+               203, "GPIO203",
+               MTK_EINT_FUNCTION(0, 187),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               204, "GPIO204",
+               MTK_EINT_FUNCTION(0, 188),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               205, "GPIO205",
+               MTK_EINT_FUNCTION(0, 189),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               206, "GPIO206",
+               MTK_EINT_FUNCTION(0, 190),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               207, "GPIO207",
+               MTK_EINT_FUNCTION(0, 191),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               208, "GPIO208",
+               MTK_EINT_FUNCTION(0, 193),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+       MTK_PIN(
+               209, "GPIO209",
+               MTK_EINT_FUNCTION(0, 194),
+               DRV_GRP4,
+               MTK_FUNCTION(0, NULL)
+       ),
+};
+
+#endif /* __PINCTRL-MTK-MT6779_H */
index 90a432b..a23c182 100644 (file)
@@ -769,6 +769,13 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
        if (gpio >= hw->soc->npins)
                return -EINVAL;
 
+       /*
+        * "Virtual" GPIOs are always and only used for interrupts
+        * Since they are only used for interrupts, they are always inputs
+        */
+       if (mtk_is_virt_gpio(hw, gpio))
+               return 1;
+
        desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
 
        err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
index 0bcec03..8abf750 100644 (file)
@@ -746,11 +746,6 @@ static const char * const i2c3_groups[] = {
        "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f",
 };
 
-static const char * const i2c_slave_groups[] = {
-       "i2c_slave_sda_a", "i2c_slave_sck_a",
-       "i2c_slave_sda_f", "i2c_slave_sck_f",
-};
-
 static const char * const spi_a_groups[] = {
        "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5",
        "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10",
index 079f8ee..20683cd 100644 (file)
 #include "../pinctrl-utils.h"
 #include "pinctrl-meson.h"
 
+static const unsigned int meson_bit_strides[] = {
+       1, 1, 1, 1, 1, 2, 1
+};
+
 /**
  * meson_get_bank() - find the bank containing a given pin
  *
@@ -96,8 +100,9 @@ static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin,
 {
        struct meson_reg_desc *desc = &bank->regs[reg_type];
 
-       *reg = desc->reg * 4;
-       *bit = desc->bit + pin - bank->first;
+       *bit = (desc->bit + pin - bank->first) * meson_bit_strides[reg_type];
+       *reg = (desc->reg + (*bit / 32)) * 4;
+       *bit &= 0x1f;
 }
 
 static int meson_get_groups_count(struct pinctrl_dev *pcdev)
@@ -314,7 +319,6 @@ static int meson_pinconf_set_drive_strength(struct meson_pinctrl *pc,
                return ret;
 
        meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
-       bit = bit << 1;
 
        if (drive_strength_ua <= 500) {
                ds_val = MESON_PINCONF_DRV_500UA;
@@ -441,7 +445,6 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
                return ret;
 
        meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
-       bit = bit << 1;
 
        ret = regmap_read(pc->reg_ds, reg, &val);
        if (ret)
index 5f125bd..953126b 100644 (file)
  * The pins of a pinmux groups are composed of one or two groups of contiguous
  * pins.
  * @name:      Name of the pin group, used to lookup the group.
- * @start_pins:        Index of the first pin of the main range of pins belonging to
+ * @start_pin: Index of the first pin of the main range of pins belonging to
  *             the group
  * @npins:     Number of pins included in the first range
  * @reg_mask:  Bit mask matching the group in the selection register
- * @extra_pins:        Index of the first pin of the optional second range of pins
+ * @val:       Value to write to the registers for a given function
+ * @extra_pin: Index of the first pin of the optional second range of pins
  *             belonging to the group
- * @npins:     Number of pins included in the second optional range
+ * @extra_npins:Number of pins included in the second optional range
  * @funcs:     A list of pinmux functions that can be selected for this group.
  * @pins:      List of the pins included in the group
  */
index dfef471..1e225d5 100644 (file)
@@ -231,9 +231,10 @@ static void parse_dt_cfg(struct device_node *np,
  * pinconf_generic_parse_dt_config()
  * parse the config properties into generic pinconfig values.
  * @np: node containing the pinconfig properties
+ * @pctldev: pincontrol device
  * @configs: array with nconfigs entries containing the generic pinconf values
  *           must be freed when no longer necessary.
- * @nconfigs: umber of configurations
+ * @nconfigs: number of configurations
  */
 int pinconf_generic_parse_dt_config(struct device_node *np,
                                    struct pinctrl_dev *pctldev,
index 1fe62a3..9a760f5 100644 (file)
@@ -417,22 +417,13 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
        u32 pin_reg, pin_reg_irq_en, mask;
-       unsigned long flags, irq_flags;
+       unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
 
-       /* Ignore the settings coming from the client and
-        * read the values from the ACPI tables
-        * while setting the trigger type
-        */
-
-       irq_flags = irq_get_trigger_type(d->irq);
-       if (irq_flags != IRQ_TYPE_NONE)
-               type = irq_flags;
-
        switch (type & IRQ_TYPE_SENSE_MASK) {
        case IRQ_TYPE_EDGE_RISING:
                pin_reg &= ~BIT(LEVEL_TRIG_OFF);
@@ -855,6 +846,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
        int irq_base;
        struct resource *res;
        struct amd_gpio *gpio_dev;
+       struct gpio_irq_chip *girq;
 
        gpio_dev = devm_kzalloc(&pdev->dev,
                                sizeof(struct amd_gpio), GFP_KERNEL);
@@ -916,6 +908,15 @@ static int amd_gpio_probe(struct platform_device *pdev)
                return PTR_ERR(gpio_dev->pctrl);
        }
 
+       girq = &gpio_dev->gc.irq;
+       girq->chip = &amd_gpio_irqchip;
+       /* This will let us handle the parent IRQ in the driver */
+       girq->parent_handler = NULL;
+       girq->num_parents = 0;
+       girq->parents = NULL;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_simple_irq;
+
        ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev);
        if (ret)
                return ret;
@@ -927,17 +928,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
                goto out2;
        }
 
-       ret = gpiochip_irqchip_add(&gpio_dev->gc,
-                               &amd_gpio_irqchip,
-                               0,
-                               handle_simple_irq,
-                               IRQ_TYPE_NONE);
-       if (ret) {
-               dev_err(&pdev->dev, "could not add irqchip\n");
-               ret = -ENODEV;
-               goto out2;
-       }
-
        ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
        if (ret)
@@ -965,12 +955,14 @@ static int amd_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id amd_gpio_acpi_match[] = {
        { "AMD0030", 0 },
        { "AMDI0030", 0},
        { },
 };
 MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
+#endif
 
 static struct platform_driver amd_gpio_driver = {
        .driver         = {
index 54222cc..8e5a505 100644 (file)
@@ -106,6 +106,8 @@ struct atmel_pin {
  * @irq_domain: irq domain for the gpio controller.
  * @irqs: table containing the hw irq number of the bank. The index of the
  *     table is the bank id.
+ * @pm_wakeup_sources: bitmap of wakeup sources (lines)
+ * @pm_suspend_backup: backup/restore register values on suspend/resume
  * @dev: device entry for the Atmel PIO controller.
  * @node: node of the Atmel PIO controller.
  */
index 9c52130..72edc67 100644 (file)
@@ -65,7 +65,7 @@ static int gpio_banks;
 #define DEBOUNCE_VAL_SHIFT     17
 #define DEBOUNCE_VAL   (0x3fff << DEBOUNCE_VAL_SHIFT)
 
-/**
+/*
  * These defines will translated the dt binding settings to our internal
  * settings. They are not necessarily the same value as the register setting.
  * The actual drive strength current of low, medium and high must be looked up
@@ -161,6 +161,10 @@ struct at91_pin_group {
  * @set_pulldown: enable/disable pulldown
  * @get_schmitt_trig: get schmitt trigger status
  * @disable_schmitt_trig: disable schmitt trigger
+ * @get_drivestrength: get driver strength
+ * @set_drivestrength: set driver strength
+ * @get_slewrate: get slew rate
+ * @set_slewrate: set slew rate
  * @irq_type: return irq type
  */
 struct at91_pinctrl_mux_ops {
index d1a7d98..a8e2672 100644 (file)
 /**
  * struct bm1880_pinctrl - driver data
  * @base:      Pinctrl base address
- * @pctrl:     Pinctrl device
+ * @pctrldev:  Pinctrl device
  * @groups:    Pingroups
  * @ngroups:   Number of @groups
  * @funcs:     Pinmux functions
  * @nfuncs:    Number of @funcs
- * @pconf:     Pinconf data
+ * @pinconf:   Pinconf data
  */
 struct bm1880_pinctrl {
        void __iomem *base;
index 6a8d445..a8d1b53 100644 (file)
@@ -124,6 +124,7 @@ static int jz4740_nand_cs1_pins[] = { 0x39, };
 static int jz4740_nand_cs2_pins[] = { 0x3a, };
 static int jz4740_nand_cs3_pins[] = { 0x3b, };
 static int jz4740_nand_cs4_pins[] = { 0x3c, };
+static int jz4740_nand_fre_fwe_pins[] = { 0x5c, 0x5d, };
 static int jz4740_pwm_pwm0_pins[] = { 0x77, };
 static int jz4740_pwm_pwm1_pins[] = { 0x78, };
 static int jz4740_pwm_pwm2_pins[] = { 0x79, };
@@ -146,6 +147,7 @@ static int jz4740_nand_cs1_funcs[] = { 0, };
 static int jz4740_nand_cs2_funcs[] = { 0, };
 static int jz4740_nand_cs3_funcs[] = { 0, };
 static int jz4740_nand_cs4_funcs[] = { 0, };
+static int jz4740_nand_fre_fwe_funcs[] = { 0, 0, };
 static int jz4740_pwm_pwm0_funcs[] = { 0, };
 static int jz4740_pwm_pwm1_funcs[] = { 0, };
 static int jz4740_pwm_pwm2_funcs[] = { 0, };
@@ -178,6 +180,7 @@ static const struct group_desc jz4740_groups[] = {
        INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2),
        INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3),
        INGENIC_PIN_GROUP("nand-cs4", jz4740_nand_cs4),
+       INGENIC_PIN_GROUP("nand-fre-fwe", jz4740_nand_fre_fwe),
        INGENIC_PIN_GROUP("pwm0", jz4740_pwm_pwm0),
        INGENIC_PIN_GROUP("pwm1", jz4740_pwm_pwm1),
        INGENIC_PIN_GROUP("pwm2", jz4740_pwm_pwm2),
@@ -195,7 +198,7 @@ static const char *jz4740_lcd_groups[] = {
        "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-18bit-tft", "lcd-no-pins",
 };
 static const char *jz4740_nand_groups[] = {
-       "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4",
+       "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe",
 };
 static const char *jz4740_pwm0_groups[] = { "pwm0", };
 static const char *jz4740_pwm1_groups[] = { "pwm1", };
@@ -1810,9 +1813,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
                 */
                high = ingenic_gpio_get_value(jzgc, irq);
                if (high)
-                       irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
+                       irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW);
                else
-                       irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
+                       irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
        }
 
        if (jzgc->jzpc->info->version >= ID_JZ4760)
@@ -1848,7 +1851,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
                 */
                bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
 
-               type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+               type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
        }
 
        irq_set_type(jzgc, irqd->hwirq, type);
@@ -1955,7 +1958,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
        unsigned int pin = gc->base + offset;
 
        if (jzpc->info->version >= ID_JZ4760) {
-               if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+               if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
+                   ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
                        return GPIO_LINE_DIRECTION_IN;
                return GPIO_LINE_DIRECTION_OUT;
        }
@@ -2292,6 +2296,7 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
 
 static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
        { .compatible = "ingenic,jz4740-gpio", },
+       { .compatible = "ingenic,jz4725b-gpio", },
        { .compatible = "ingenic,jz4760-gpio", },
        { .compatible = "ingenic,jz4770-gpio", },
        { .compatible = "ingenic,jz4780-gpio", },
index e467754..7b2f885 100644 (file)
@@ -838,11 +838,11 @@ static int lpc18xx_pconf_get_pin(struct pinctrl_dev *pctldev, unsigned param,
                *arg = (reg & LPC18XX_SCU_PIN_EHD_MASK) >> LPC18XX_SCU_PIN_EHD_POS;
                switch (*arg) {
                case 3: *arg += 5;
-                       /* fall through */
+                       fallthrough;
                case 2: *arg += 5;
-                       /* fall through */
+                       fallthrough;
                case 1: *arg += 3;
-                       /* fall through */
+                       fallthrough;
                case 0: *arg += 4;
                }
                break;
@@ -1057,11 +1057,11 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
 
                switch (param_val) {
                case 20: param_val -= 5;
-                        /* fall through */
+                       fallthrough;
                case 14: param_val -= 5;
-                        /* fall through */
+                       fallthrough;
                case  8: param_val -= 3;
-                        /* fall through */
+                       fallthrough;
                case  4: param_val -= 4;
                         break;
                default:
index 151931b..42b12ea 100644 (file)
@@ -522,29 +522,6 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
        return 0;
 }
 
-static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
-{
-       struct gpio_chip *chip = &mcp->chip;
-       int err;
-
-       err =  gpiochip_irqchip_add_nested(chip,
-                                          &mcp->irq_chip,
-                                          0,
-                                          handle_simple_irq,
-                                          IRQ_TYPE_NONE);
-       if (err) {
-               dev_err(chip->parent,
-                       "could not connect irqchip to gpiochip: %d\n", err);
-               return err;
-       }
-
-       gpiochip_set_nested_irqchip(chip,
-                                   &mcp->irq_chip,
-                                   mcp->irq);
-
-       return 0;
-}
-
 /*----------------------------------------------------------------------*/
 
 int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
@@ -589,10 +566,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        if (ret < 0)
                goto fail;
 
-       ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
-       if (ret < 0)
-               goto fail;
-
        mcp->irq_controller =
                device_property_read_bool(dev, "interrupt-controller");
        if (mcp->irq && mcp->irq_controller) {
@@ -629,11 +602,22 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        }
 
        if (mcp->irq && mcp->irq_controller) {
-               ret = mcp23s08_irqchip_setup(mcp);
-               if (ret)
-                       goto fail;
+               struct gpio_irq_chip *girq = &mcp->chip.irq;
+
+               girq->chip = &mcp->irq_chip;
+               /* This will let us handle the parent IRQ in the driver */
+               girq->parent_handler = NULL;
+               girq->num_parents = 0;
+               girq->parents = NULL;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_simple_irq;
+               girq->threaded = true;
        }
 
+       ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
+       if (ret < 0)
+               goto fail;
+
        mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
        mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
        mcp->pinctrl_desc.npins = mcp->chip.ngpio;
index 95c225b..425a3d7 100644 (file)
 #include "pinconf.h"
 #include "pinmux.h"
 
+#define ocelot_clrsetbits(addr, clear, set) \
+       writel((readl(addr) & ~(clear)) | (set), (addr))
+
+/* PINCONFIG bits (sparx5 only) */
+enum {
+       PINCONF_BIAS,
+       PINCONF_SCHMITT,
+       PINCONF_DRIVE_STRENGTH,
+};
+
+#define BIAS_PD_BIT BIT(4)
+#define BIAS_PU_BIT BIT(3)
+#define BIAS_BITS   (BIAS_PD_BIT|BIAS_PU_BIT)
+#define SCHMITT_BIT BIT(2)
+#define DRIVE_BITS  GENMASK(1, 0)
+
+/* GPIO standard registers */
 #define OCELOT_GPIO_OUT_SET    0x0
 #define OCELOT_GPIO_OUT_CLR    0x4
 #define OCELOT_GPIO_OUT                0x8
 enum {
        FUNC_NONE,
        FUNC_GPIO,
+       FUNC_IRQ0,
        FUNC_IRQ0_IN,
        FUNC_IRQ0_OUT,
+       FUNC_IRQ1,
        FUNC_IRQ1_IN,
        FUNC_IRQ1_OUT,
+       FUNC_EXT_IRQ,
        FUNC_MIIM,
+       FUNC_PHY_LED,
        FUNC_PCI_WAKE,
+       FUNC_MD,
        FUNC_PTP0,
        FUNC_PTP1,
        FUNC_PTP2,
@@ -59,24 +81,36 @@ enum {
        FUNC_SG1,
        FUNC_SG2,
        FUNC_SI,
+       FUNC_SI2,
        FUNC_TACHO,
        FUNC_TWI,
        FUNC_TWI2,
+       FUNC_TWI3,
        FUNC_TWI_SCL_M,
        FUNC_UART,
        FUNC_UART2,
+       FUNC_UART3,
+       FUNC_PLL_STAT,
+       FUNC_EMMC,
+       FUNC_REF_CLK,
+       FUNC_RCVRD_CLK,
        FUNC_MAX
 };
 
 static const char *const ocelot_function_names[] = {
        [FUNC_NONE]             = "none",
        [FUNC_GPIO]             = "gpio",
+       [FUNC_IRQ0]             = "irq0",
        [FUNC_IRQ0_IN]          = "irq0_in",
        [FUNC_IRQ0_OUT]         = "irq0_out",
+       [FUNC_IRQ1]             = "irq1",
        [FUNC_IRQ1_IN]          = "irq1_in",
        [FUNC_IRQ1_OUT]         = "irq1_out",
+       [FUNC_EXT_IRQ]          = "ext_irq",
        [FUNC_MIIM]             = "miim",
+       [FUNC_PHY_LED]          = "phy_led",
        [FUNC_PCI_WAKE]         = "pci_wake",
+       [FUNC_MD]               = "md",
        [FUNC_PTP0]             = "ptp0",
        [FUNC_PTP1]             = "ptp1",
        [FUNC_PTP2]             = "ptp2",
@@ -88,12 +122,19 @@ static const char *const ocelot_function_names[] = {
        [FUNC_SG1]              = "sg1",
        [FUNC_SG2]              = "sg2",
        [FUNC_SI]               = "si",
+       [FUNC_SI2]              = "si2",
        [FUNC_TACHO]            = "tacho",
        [FUNC_TWI]              = "twi",
        [FUNC_TWI2]             = "twi2",
+       [FUNC_TWI3]             = "twi3",
        [FUNC_TWI_SCL_M]        = "twi_scl_m",
        [FUNC_UART]             = "uart",
        [FUNC_UART2]            = "uart2",
+       [FUNC_UART3]            = "uart3",
+       [FUNC_PLL_STAT]         = "pll_stat",
+       [FUNC_EMMC]             = "emmc",
+       [FUNC_REF_CLK]          = "ref_clk",
+       [FUNC_RCVRD_CLK]        = "rcvrd_clk",
 };
 
 struct ocelot_pmx_func {
@@ -111,6 +152,7 @@ struct ocelot_pinctrl {
        struct pinctrl_dev *pctl;
        struct gpio_chip gpio_chip;
        struct regmap *map;
+       void __iomem *pincfg;
        struct pinctrl_desc *desc;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
@@ -324,6 +366,152 @@ static const struct pinctrl_pin_desc jaguar2_pins[] = {
        JAGUAR2_PIN(63),
 };
 
+#define SPARX5_P(p, f0, f1, f2)                                        \
+static struct ocelot_pin_caps sparx5_pin_##p = {                       \
+       .pin = p,                                                       \
+       .functions = {                                                  \
+               FUNC_GPIO, FUNC_##f0, FUNC_##f1, FUNC_##f2              \
+       },                                                              \
+}
+
+SPARX5_P(0,  SG0,       PLL_STAT,  NONE);
+SPARX5_P(1,  SG0,       NONE,      NONE);
+SPARX5_P(2,  SG0,       NONE,      NONE);
+SPARX5_P(3,  SG0,       NONE,      NONE);
+SPARX5_P(4,  SG1,       NONE,      NONE);
+SPARX5_P(5,  SG1,       NONE,      NONE);
+SPARX5_P(6,  IRQ0_IN,   IRQ0_OUT,  SFP);
+SPARX5_P(7,  IRQ1_IN,   IRQ1_OUT,  SFP);
+SPARX5_P(8,  PTP0,      NONE,      SFP);
+SPARX5_P(9,  PTP1,      SFP,       TWI_SCL_M);
+SPARX5_P(10, UART,      NONE,      NONE);
+SPARX5_P(11, UART,      NONE,      NONE);
+SPARX5_P(12, SG1,       NONE,      NONE);
+SPARX5_P(13, SG1,       NONE,      NONE);
+SPARX5_P(14, TWI,       TWI_SCL_M, NONE);
+SPARX5_P(15, TWI,       NONE,      NONE);
+SPARX5_P(16, SI,        TWI_SCL_M, SFP);
+SPARX5_P(17, SI,        TWI_SCL_M, SFP);
+SPARX5_P(18, SI,        TWI_SCL_M, SFP);
+SPARX5_P(19, PCI_WAKE,  TWI_SCL_M, SFP);
+SPARX5_P(20, IRQ0_OUT,  TWI_SCL_M, SFP);
+SPARX5_P(21, IRQ1_OUT,  TACHO,     SFP);
+SPARX5_P(22, TACHO,     IRQ0_OUT,  TWI_SCL_M);
+SPARX5_P(23, PWM,       UART3,     TWI_SCL_M);
+SPARX5_P(24, PTP2,      UART3,     TWI_SCL_M);
+SPARX5_P(25, PTP3,      SI,        TWI_SCL_M);
+SPARX5_P(26, UART2,     SI,        TWI_SCL_M);
+SPARX5_P(27, UART2,     SI,        TWI_SCL_M);
+SPARX5_P(28, TWI2,      SI,        SFP);
+SPARX5_P(29, TWI2,      SI,        SFP);
+SPARX5_P(30, SG2,       SI,        PWM);
+SPARX5_P(31, SG2,       SI,        TWI_SCL_M);
+SPARX5_P(32, SG2,       SI,        TWI_SCL_M);
+SPARX5_P(33, SG2,       SI,        SFP);
+SPARX5_P(34, NONE,      TWI_SCL_M, EMMC);
+SPARX5_P(35, SFP,       TWI_SCL_M, EMMC);
+SPARX5_P(36, SFP,       TWI_SCL_M, EMMC);
+SPARX5_P(37, SFP,       NONE,      EMMC);
+SPARX5_P(38, NONE,      TWI_SCL_M, EMMC);
+SPARX5_P(39, SI2,       TWI_SCL_M, EMMC);
+SPARX5_P(40, SI2,       TWI_SCL_M, EMMC);
+SPARX5_P(41, SI2,       TWI_SCL_M, EMMC);
+SPARX5_P(42, SI2,       TWI_SCL_M, EMMC);
+SPARX5_P(43, SI2,       TWI_SCL_M, EMMC);
+SPARX5_P(44, SI,        SFP,       EMMC);
+SPARX5_P(45, SI,        SFP,       EMMC);
+SPARX5_P(46, NONE,      SFP,       EMMC);
+SPARX5_P(47, NONE,      SFP,       EMMC);
+SPARX5_P(48, TWI3,      SI,        SFP);
+SPARX5_P(49, TWI3,      NONE,      SFP);
+SPARX5_P(50, SFP,       NONE,      TWI_SCL_M);
+SPARX5_P(51, SFP,       SI,        TWI_SCL_M);
+SPARX5_P(52, SFP,       MIIM,      TWI_SCL_M);
+SPARX5_P(53, SFP,       MIIM,      TWI_SCL_M);
+SPARX5_P(54, SFP,       PTP2,      TWI_SCL_M);
+SPARX5_P(55, SFP,       PTP3,      PCI_WAKE);
+SPARX5_P(56, MIIM,      SFP,       TWI_SCL_M);
+SPARX5_P(57, MIIM,      SFP,       TWI_SCL_M);
+SPARX5_P(58, MIIM,      SFP,       TWI_SCL_M);
+SPARX5_P(59, MIIM,      SFP,       NONE);
+SPARX5_P(60, RECO_CLK,  NONE,      NONE);
+SPARX5_P(61, RECO_CLK,  NONE,      NONE);
+SPARX5_P(62, RECO_CLK,  PLL_STAT,  NONE);
+SPARX5_P(63, RECO_CLK,  NONE,      NONE);
+
+#define SPARX5_PIN(n) {                                        \
+       .number = n,                                            \
+       .name = "GPIO_"#n,                                      \
+       .drv_data = &sparx5_pin_##n                             \
+}
+
+static const struct pinctrl_pin_desc sparx5_pins[] = {
+       SPARX5_PIN(0),
+       SPARX5_PIN(1),
+       SPARX5_PIN(2),
+       SPARX5_PIN(3),
+       SPARX5_PIN(4),
+       SPARX5_PIN(5),
+       SPARX5_PIN(6),
+       SPARX5_PIN(7),
+       SPARX5_PIN(8),
+       SPARX5_PIN(9),
+       SPARX5_PIN(10),
+       SPARX5_PIN(11),
+       SPARX5_PIN(12),
+       SPARX5_PIN(13),
+       SPARX5_PIN(14),
+       SPARX5_PIN(15),
+       SPARX5_PIN(16),
+       SPARX5_PIN(17),
+       SPARX5_PIN(18),
+       SPARX5_PIN(19),
+       SPARX5_PIN(20),
+       SPARX5_PIN(21),
+       SPARX5_PIN(22),
+       SPARX5_PIN(23),
+       SPARX5_PIN(24),
+       SPARX5_PIN(25),
+       SPARX5_PIN(26),
+       SPARX5_PIN(27),
+       SPARX5_PIN(28),
+       SPARX5_PIN(29),
+       SPARX5_PIN(30),
+       SPARX5_PIN(31),
+       SPARX5_PIN(32),
+       SPARX5_PIN(33),
+       SPARX5_PIN(34),
+       SPARX5_PIN(35),
+       SPARX5_PIN(36),
+       SPARX5_PIN(37),
+       SPARX5_PIN(38),
+       SPARX5_PIN(39),
+       SPARX5_PIN(40),
+       SPARX5_PIN(41),
+       SPARX5_PIN(42),
+       SPARX5_PIN(43),
+       SPARX5_PIN(44),
+       SPARX5_PIN(45),
+       SPARX5_PIN(46),
+       SPARX5_PIN(47),
+       SPARX5_PIN(48),
+       SPARX5_PIN(49),
+       SPARX5_PIN(50),
+       SPARX5_PIN(51),
+       SPARX5_PIN(52),
+       SPARX5_PIN(53),
+       SPARX5_PIN(54),
+       SPARX5_PIN(55),
+       SPARX5_PIN(56),
+       SPARX5_PIN(57),
+       SPARX5_PIN(58),
+       SPARX5_PIN(59),
+       SPARX5_PIN(60),
+       SPARX5_PIN(61),
+       SPARX5_PIN(62),
+       SPARX5_PIN(63),
+};
+
 static int ocelot_get_functions_count(struct pinctrl_dev *pctldev)
 {
        return ARRAY_SIZE(ocelot_function_names);
@@ -382,6 +570,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
         * ALT[1]
         * This is racy because both registers can't be updated at the same time
         * but it doesn't matter much for now.
+        * Note: ALT0/ALT1 are organized specially for 64 gpio targets
         */
        regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
                           BIT(p), f << p);
@@ -458,6 +647,219 @@ static int ocelot_pctl_get_group_pins(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
+                              unsigned int pin,
+                              unsigned int reg,
+                              int *val)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (info->pincfg) {
+               u32 regcfg = readl(info->pincfg + (pin * sizeof(u32)));
+
+               ret = 0;
+               switch (reg) {
+               case PINCONF_BIAS:
+                       *val = regcfg & BIAS_BITS;
+                       break;
+
+               case PINCONF_SCHMITT:
+                       *val = regcfg & SCHMITT_BIT;
+                       break;
+
+               case PINCONF_DRIVE_STRENGTH:
+                       *val = regcfg & DRIVE_BITS;
+                       break;
+
+               default:
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
+                              unsigned int pin,
+                              unsigned int reg,
+                              int val)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (info->pincfg) {
+               void __iomem *regaddr = info->pincfg + (pin * sizeof(u32));
+
+               ret = 0;
+               switch (reg) {
+               case PINCONF_BIAS:
+                       ocelot_clrsetbits(regaddr, BIAS_BITS, val);
+                       break;
+
+               case PINCONF_SCHMITT:
+                       ocelot_clrsetbits(regaddr, SCHMITT_BIT, val);
+                       break;
+
+               case PINCONF_DRIVE_STRENGTH:
+                       if (val <= 3)
+                               ocelot_clrsetbits(regaddr, DRIVE_BITS, val);
+                       else
+                               ret = -EINVAL;
+                       break;
+
+               default:
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
+                             unsigned int pin, unsigned long *config)
+{
+       struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       u32 param = pinconf_to_config_param(*config);
+       int val, err;
+
+       switch (param) {
+       case PIN_CONFIG_BIAS_DISABLE:
+       case PIN_CONFIG_BIAS_PULL_UP:
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               err = ocelot_hw_get_value(info, pin, PINCONF_BIAS, &val);
+               if (err)
+                       return err;
+               if (param == PIN_CONFIG_BIAS_DISABLE)
+                       val = (val == 0 ? true : false);
+               else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
+                       val = (val & BIAS_PD_BIT ? true : false);
+               else    /* PIN_CONFIG_BIAS_PULL_UP */
+                       val = (val & BIAS_PU_BIT ? true : false);
+               break;
+
+       case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+               err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
+               if (err)
+                       return err;
+
+               val = (val & SCHMITT_BIT ? true : false);
+               break;
+
+       case PIN_CONFIG_DRIVE_STRENGTH:
+               err = ocelot_hw_get_value(info, pin, PINCONF_DRIVE_STRENGTH,
+                                         &val);
+               if (err)
+                       return err;
+               break;
+
+       case PIN_CONFIG_OUTPUT:
+               err = regmap_read(info->map, REG(OCELOT_GPIO_OUT, info, pin),
+                                 &val);
+               if (err)
+                       return err;
+               val = !!(val & BIT(pin % 32));
+               break;
+
+       case PIN_CONFIG_INPUT_ENABLE:
+       case PIN_CONFIG_OUTPUT_ENABLE:
+               err = regmap_read(info->map, REG(OCELOT_GPIO_OE, info, pin),
+                                 &val);
+               if (err)
+                       return err;
+               val = val & BIT(pin % 32);
+               if (param == PIN_CONFIG_OUTPUT_ENABLE)
+                       val = !!val;
+               else
+                       val = !val;
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       *config = pinconf_to_config_packed(param, val);
+
+       return 0;
+}
+
+static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+                             unsigned long *configs, unsigned int num_configs)
+{
+       struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       u32 param, arg, p;
+       int cfg, err = 0;
+
+       for (cfg = 0; cfg < num_configs; cfg++) {
+               param = pinconf_to_config_param(configs[cfg]);
+               arg = pinconf_to_config_argument(configs[cfg]);
+
+               switch (param) {
+               case PIN_CONFIG_BIAS_DISABLE:
+               case PIN_CONFIG_BIAS_PULL_UP:
+               case PIN_CONFIG_BIAS_PULL_DOWN:
+                       arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
+                       (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
+                       BIAS_PD_BIT;
+
+                       err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
+                       if (err)
+                               goto err;
+
+                       break;
+
+               case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+                       arg = arg ? SCHMITT_BIT : 0;
+                       err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
+                                                 arg);
+                       if (err)
+                               goto err;
+
+                       break;
+
+               case PIN_CONFIG_DRIVE_STRENGTH:
+                       err = ocelot_hw_set_value(info, pin,
+                                                 PINCONF_DRIVE_STRENGTH,
+                                                 arg);
+                       if (err)
+                               goto err;
+
+                       break;
+
+               case PIN_CONFIG_OUTPUT_ENABLE:
+               case PIN_CONFIG_INPUT_ENABLE:
+               case PIN_CONFIG_OUTPUT:
+                       p = pin % 32;
+                       if (arg)
+                               regmap_write(info->map,
+                                            REG(OCELOT_GPIO_OUT_SET, info,
+                                                pin),
+                                            BIT(p));
+                       else
+                               regmap_write(info->map,
+                                            REG(OCELOT_GPIO_OUT_CLR, info,
+                                                pin),
+                                            BIT(p));
+                       regmap_update_bits(info->map,
+                                          REG(OCELOT_GPIO_OE, info, pin),
+                                          BIT(p),
+                                          param == PIN_CONFIG_INPUT_ENABLE ?
+                                          0 : BIT(p));
+                       break;
+
+               default:
+                       err = -EOPNOTSUPP;
+               }
+       }
+err:
+       return err;
+}
+
+static const struct pinconf_ops ocelot_confops = {
+       .is_generic = true,
+       .pin_config_get = ocelot_pinconf_get,
+       .pin_config_set = ocelot_pinconf_set,
+       .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
 static const struct pinctrl_ops ocelot_pctl_ops = {
        .get_groups_count = ocelot_pctl_get_groups_count,
        .get_group_name = ocelot_pctl_get_group_name,
@@ -484,6 +886,16 @@ static struct pinctrl_desc jaguar2_desc = {
        .owner = THIS_MODULE,
 };
 
+static struct pinctrl_desc sparx5_desc = {
+       .name = "sparx5-pinctrl",
+       .pins = sparx5_pins,
+       .npins = ARRAY_SIZE(sparx5_pins),
+       .pctlops = &ocelot_pctl_ops,
+       .pmxops = &ocelot_pmx_ops,
+       .confops = &ocelot_confops,
+       .owner = THIS_MODULE,
+};
+
 static int ocelot_create_group_func_map(struct device *dev,
                                        struct ocelot_pinctrl *info)
 {
@@ -511,7 +923,8 @@ static int ocelot_create_group_func_map(struct device *dev,
                }
 
                for (i = 0; i < npins; i++)
-                       info->func[f].groups[i] = info->desc->pins[pins[i]].name;
+                       info->func[f].groups[i] =
+                               info->desc->pins[pins[i]].name;
        }
 
        kfree(pins);
@@ -744,6 +1157,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
 static const struct of_device_id ocelot_pinctrl_of_match[] = {
        { .compatible = "mscc,ocelot-pinctrl", .data = &ocelot_desc },
        { .compatible = "mscc,jaguar2-pinctrl", .data = &jaguar2_desc },
+       { .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc },
        {},
 };
 
@@ -752,6 +1166,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ocelot_pinctrl *info;
        void __iomem *base;
+       struct resource *res;
        int ret;
        struct regmap_config regmap_config = {
                .reg_bits = 32,
@@ -773,6 +1188,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        }
 
        info->stride = 1 + (info->desc->npins - 1) / 32;
+
        regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4;
 
        info->map = devm_regmap_init_mmio(dev, base, &regmap_config);
@@ -783,6 +1199,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        dev_set_drvdata(dev, info->map);
        info->dev = dev;
 
+       /* Pinconf registers */
+       if (info->desc->confops) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(base))
+                       dev_dbg(dev, "Failed to ioremap config registers (no extended pinconf)\n");
+               else
+                       info->pincfg = base;
+       }
+
        ret = ocelot_pinctrl_register(pdev, info);
        if (ret)
                return ret;
@@ -791,6 +1217,8 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       dev_info(dev, "driver registered\n");
+
        return 0;
 }
 
index c07324d..0401c1d 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  * Copyright (c) 2012 Linaro Ltd
- *             http://www.linaro.org
+ *             https://www.linaro.org
  *
  * and pinctrl-at91:
  * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
@@ -63,7 +63,7 @@ enum rockchip_pinctrl_type {
        RK3399,
 };
 
-/**
+/*
  * Encode variants of iomux registers into a type variable
  */
 #define IOMUX_GPIO_ONLY                BIT(0)
@@ -74,6 +74,7 @@ enum rockchip_pinctrl_type {
 #define IOMUX_WIDTH_2BIT       BIT(5)
 
 /**
+ * struct rockchip_iomux
  * @type: iomux variant using IOMUX_* constants
  * @offset: if initialized to -1 it will be autocalculated, by specifying
  *         an initial offset value the relevant source offset can be reset
@@ -84,7 +85,7 @@ struct rockchip_iomux {
        int                             offset;
 };
 
-/**
+/*
  * enum type index corresponding to rockchip_perpin_drv_list arrays index.
  */
 enum rockchip_pin_drv_type {
@@ -96,7 +97,7 @@ enum rockchip_pin_drv_type {
        DRV_TYPE_MAX
 };
 
-/**
+/*
  * enum type index corresponding to rockchip_pull_list arrays index.
  */
 enum rockchip_pin_pull_type {
@@ -106,6 +107,7 @@ enum rockchip_pin_pull_type {
 };
 
 /**
+ * struct rockchip_drv
  * @drv_type: drive strength variant using rockchip_perpin_drv_type
  * @offset: if initialized to -1 it will be autocalculated, by specifying
  *         an initial offset value the relevant source offset can be reset
@@ -119,8 +121,9 @@ struct rockchip_drv {
 };
 
 /**
+ * struct rockchip_pin_bank
  * @reg_base: register base of the gpio bank
- * @reg_pull: optional separate register for additional pull settings
+ * @regmap_pull: optional separate register for additional pull settings
  * @clk: clock of the gpio bank
  * @irq: interrupt of the gpio bank
  * @saved_masks: Saved content of GPIO_INTEN at suspend time.
@@ -138,6 +141,8 @@ struct rockchip_drv {
  * @gpio_chip: gpiolib chip
  * @grange: gpio range
  * @slock: spinlock for the gpio bank
+ * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
+ * @recalced_mask: bit mask to indicate a need to recalulate the mask
  * @route_mask: bits describing the routing pins of per bank
  */
 struct rockchip_pin_bank {
@@ -312,6 +317,7 @@ enum rockchip_mux_route_location {
  * @bank_num: bank number.
  * @pin: index at register or used to calc index.
  * @func: the min pin.
+ * @route_location: the mux route location (same, pmu, grf).
  * @route_offset: the max pin.
  * @route_val: the register offset.
  */
@@ -324,8 +330,6 @@ struct rockchip_mux_route_data {
        u32 route_val;
 };
 
-/**
- */
 struct rockchip_pin_ctrl {
        struct rockchip_pin_bank        *pin_banks;
        u32                             nr_banks;
@@ -363,9 +367,7 @@ struct rockchip_pin_config {
  * @name: name of the pin group, used to lookup the group.
  * @pins: the pins included in this group.
  * @npins: number of pins included in this group.
- * @func: the mux function number to be programmed when selected.
- * @configs: the config values to be set for each pin
- * @nconfigs: number of configs for each pin
+ * @data: local pin configuration
  */
 struct rockchip_pin_group {
        const char                      *name;
@@ -378,7 +380,7 @@ struct rockchip_pin_group {
  * struct rockchip_pmx_func: represent a pin function.
  * @name: name of the pin function, used to lookup the function.
  * @groups: one or more names of pin groups that provide this function.
- * @num_groups: number of groups included in @groups.
+ * @ngroups: number of groups included in @groups.
  */
 struct rockchip_pmx_func {
        const char              *name;
index 38a14bb..511f232 100644 (file)
@@ -75,7 +75,7 @@
  * RZ/A1 pinmux flags
  */
 
-/**
+/*
  * rza1_bidir_pin - describe a single pin that needs bidir flag applied.
  */
 struct rza1_bidir_pin {
@@ -83,7 +83,7 @@ struct rza1_bidir_pin {
        u8 func: 4;
 };
 
-/**
+/*
  * rza1_bidir_entry - describe a list of pins that needs bidir flag applied.
  *                   Each struct rza1_bidir_entry describes a port.
  */
@@ -92,7 +92,7 @@ struct rza1_bidir_entry {
        const struct rza1_bidir_pin *pins;
 };
 
-/**
+/*
  * rza1_swio_pin - describe a single pin that needs swio flag applied.
  */
 struct rza1_swio_pin {
@@ -102,7 +102,7 @@ struct rza1_swio_pin {
        u16 input: 1;
 };
 
-/**
+/*
  * rza1_swio_entry - describe a list of pins that needs swio flag applied
  */
 struct rza1_swio_entry {
@@ -110,7 +110,7 @@ struct rza1_swio_entry {
        const struct rza1_swio_pin *pins;
 };
 
-/**
+/*
  * rza1_pinmux_conf - group together bidir and swio pinmux flag tables
  */
 struct rza1_pinmux_conf {
@@ -431,7 +431,7 @@ static const struct rza1_pinmux_conf rza1l_pmx_conf = {
  * RZ/A1 types
  */
 /**
- * rza1_mux_conf - describes a pin multiplexing operation
+ * struct rza1_mux_conf - describes a pin multiplexing operation
  *
  * @id: the pin identifier from 0 to RZA1_NPINS
  * @port: the port where pin sits on
@@ -450,7 +450,7 @@ struct rza1_mux_conf {
 };
 
 /**
- * rza1_port - describes a pin port
+ * struct rza1_port - describes a pin port
  *
  * This is mostly useful to lock register writes per-bank and not globally.
  *
@@ -467,12 +467,12 @@ struct rza1_port {
 };
 
 /**
- * rza1_pinctrl - RZ pincontroller device
+ * struct rza1_pinctrl - RZ pincontroller device
  *
  * @dev: parent device structure
  * @mutex: protect [pinctrl|pinmux]_generic functions
  * @base: logical address base
- * @nports: number of pin controller ports
+ * @nport: number of pin controller ports
  * @ports: pin controller banks
  * @pins: pin array for pinctrl core
  * @desc: pincontroller desc for pinctrl core
@@ -536,7 +536,7 @@ static inline int rza1_pinmux_get_swio(unsigned int port,
        return -ENOENT;
 }
 
-/**
+/*
  * rza1_pinmux_get_flags() - return pinmux flags associated to a pin
  */
 static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin,
@@ -566,7 +566,7 @@ static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin,
  * RZ/A1 SoC operations
  */
 
-/**
+/*
  * rza1_set_bit() - un-locked set/clear a single bit in pin configuration
  *                 registers
  */
@@ -664,7 +664,7 @@ static inline int rza1_pin_get(struct rza1_port *port, unsigned int pin)
 /**
  * rza1_pin_mux_single() - configure pin multiplexing on a single pin
  *
- * @pinctrl: RZ/A1 pin controller device
+ * @rza1_pctl: RZ/A1 pin controller device
  * @mux_conf: pin multiplexing descriptor
  */
 static int rza1_pin_mux_single(struct rza1_pinctrl *rza1_pctl,
index f3a8a46..efe41ab 100644 (file)
@@ -42,6 +42,7 @@
  * struct pcs_func_vals - mux function register offset and value pair
  * @reg:       register virtual address
  * @val:       register value
+ * @mask:      mask
  */
 struct pcs_func_vals {
        void __iomem *reg;
@@ -83,6 +84,8 @@ struct pcs_conf_type {
  * @nvals:     number of entries in vals array
  * @pgnames:   array of pingroup names the function uses
  * @npgnames:  number of pingroup names the function uses
+ * @conf:      array of pin configurations
+ * @nconfs:    number of pin configurations available
  * @node:      list node
  */
 struct pcs_function {
@@ -560,7 +563,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
                        case PIN_CONFIG_BIAS_PULL_UP:
                                if (arg)
                                        pcs_pinconf_clear_bias(pctldev, pin);
-                               /* fall through */
+                               fallthrough;
                        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
                                data &= ~func->conf[i].mask;
                                if (arg)
@@ -653,6 +656,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
  * pcs_add_pin() - add a pin to the static per controller pin array
  * @pcs: pcs driver instance
  * @offset: register offset from base
+ * @pin_pos: unused
  */
 static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                unsigned pin_pos)
@@ -916,7 +920,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
 
        /* If pinconf isn't supported, don't parse properties in below. */
        if (!PCS_HAS_PINCONF)
-               return 0;
+               return -ENOTSUPP;
 
        /* cacluate how much properties are supported in current node */
        for (i = 0; i < ARRAY_SIZE(prop2); i++) {
@@ -928,7 +932,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
                        nconfs++;
        }
        if (!nconfs)
-               return 0;
+               return -ENOTSUPP;
 
        func->conf = devm_kcalloc(pcs->dev,
                                  nconfs, sizeof(struct pcs_conf_vals),
@@ -959,7 +963,6 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
 
 /**
  * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry
- * @pctldev: pin controller device
  * @pcs: pinctrl driver instance
  * @np: device node of the mux entry
  * @map: map entry
@@ -1017,10 +1020,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
                        break;
                }
 
-               /* Index plus one value cell */
                offset = pinctrl_spec.args[0];
                vals[found].reg = pcs->base + offset;
-               vals[found].val = pinctrl_spec.args[1];
+
+               switch (pinctrl_spec.args_count) {
+               case 2:
+                       vals[found].val = pinctrl_spec.args[1];
+                       break;
+               case 3:
+                       vals[found].val = (pinctrl_spec.args[1] | pinctrl_spec.args[2]);
+                       break;
+               }
 
                dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n",
                        pinctrl_spec.np, offset, pinctrl_spec.args[1]);
@@ -1056,9 +1066,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
 
        if (PCS_HAS_PINCONF && function) {
                res = pcs_parse_pinconf(pcs, np, function, map);
-               if (res)
+               if (res == 0)
+                       *num_maps = 2;
+               else if (res == -ENOTSUPP)
+                       *num_maps = 1;
+               else
                        goto free_pingroups;
-               *num_maps = 2;
        } else {
                *num_maps = 1;
        }
@@ -1343,7 +1356,9 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
        }
        return ret;
 }
+
 /**
+ * struct pcs_interrupt
  * @reg:       virtual address of interrupt register
  * @hwirq:     hardware irq number
  * @irq:       virtual irq number
@@ -1358,6 +1373,9 @@ struct pcs_interrupt {
 
 /**
  * pcs_irq_set() - enables or disables an interrupt
+ * @pcs_soc: SoC specific settings
+ * @irq: interrupt
+ * @enable: enable or disable the interrupt
  *
  * Note that this currently assumes one interrupt per pinctrl
  * register that is typically used for wake-up events.
@@ -1438,7 +1456,7 @@ static int pcs_irq_set_wake(struct irq_data *d, unsigned int state)
 
 /**
  * pcs_irq_handle() - common interrupt handler
- * @pcs_irq: interrupt data
+ * @pcs_soc: SoC specific settings
  *
  * Note that this currently assumes we have one interrupt bit per
  * mux register. This interrupt is typically used for wake-up events.
@@ -1486,7 +1504,6 @@ static irqreturn_t pcs_irq_handler(int irq, void *d)
 
 /**
  * pcs_irq_handle() - handler for the dedicated chained interrupt case
- * @irq: interrupt
  * @desc: interrupt descriptor
  *
  * Use this if you have a separate interrupt for each
index 1aae803..008c831 100644 (file)
@@ -616,6 +616,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
        struct device_node *np = pdev->dev.of_node;
        struct stmfx_pinctrl *pctl;
+       struct gpio_irq_chip *girq;
        int irq, ret;
 
        pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL);
@@ -674,16 +675,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        pctl->gpio_chip.can_sleep = true;
        pctl->gpio_chip.of_node = np;
 
-       ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
-       if (ret) {
-               dev_err(pctl->dev, "gpio_chip registration failed\n");
-               return ret;
-       }
-
-       ret = stmfx_pinctrl_gpio_function_enable(pctl);
-       if (ret)
-               return ret;
-
        pctl->irq_chip.name = dev_name(pctl->dev);
        pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask;
        pctl->irq_chip.irq_unmask = stmfx_pinctrl_irq_unmask;
@@ -693,13 +684,26 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
        pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources;
        pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources;
 
-       ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
-                                         0, handle_bad_irq, IRQ_TYPE_NONE);
+       girq = &pctl->gpio_chip.irq;
+       girq->chip = &pctl->irq_chip;
+       /* This will let us handle the parent IRQ in the driver */
+       girq->parent_handler = NULL;
+       girq->num_parents = 0;
+       girq->parents = NULL;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_bad_irq;
+       girq->threaded = true;
+
+       ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
        if (ret) {
-               dev_err(pctl->dev, "cannot add irqchip to gpiochip\n");
+               dev_err(pctl->dev, "gpio_chip registration failed\n");
                return ret;
        }
 
+       ret = stmfx_pinctrl_gpio_function_enable(pctl);
+       if (ret)
+               return ret;
+
        ret = devm_request_threaded_irq(pctl->dev, irq, NULL,
                                        stmfx_pinctrl_irq_thread_fn,
                                        IRQF_ONESHOT,
@@ -709,8 +713,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
                return ret;
        }
 
-       gpiochip_set_nested_irqchip(&pctl->gpio_chip, &pctl->irq_chip, irq);
-
        dev_info(pctl->dev,
                 "%ld GPIOs available\n", hweight_long(pctl->gpio_valid_mask));
 
index 708bc91..b325a13 100644 (file)
@@ -1187,17 +1187,10 @@ static int sx150x_probe(struct i2c_client *client,
        if (pctl->data->model != SX150X_789)
                pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
 
-       ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl);
-       if (ret)
-               return ret;
-
-       ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev),
-                                    0, 0, pctl->data->npins);
-       if (ret)
-               return ret;
-
        /* Add Interrupt support if an irq is specified */
        if (client->irq > 0) {
+               struct gpio_irq_chip *girq;
+
                pctl->irq_chip.irq_mask = sx150x_irq_mask;
                pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
                pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
@@ -1213,8 +1206,8 @@ static int sx150x_probe(struct i2c_client *client,
 
                /*
                 * Because sx150x_irq_threaded_fn invokes all of the
-                * nested interrrupt handlers via handle_nested_irq,
-                * any "handler" passed to gpiochip_irqchip_add()
+                * nested interrupt handlers via handle_nested_irq,
+                * any "handler" assigned to struct gpio_irq_chip
                 * below is going to be ignored, so the choice of the
                 * function does not matter that much.
                 *
@@ -1222,13 +1215,15 @@ static int sx150x_probe(struct i2c_client *client,
                 * plus it will be instantly noticeable if it is ever
                 * called (should not happen)
                 */
-               ret = gpiochip_irqchip_add_nested(&pctl->gpio,
-                                       &pctl->irq_chip, 0,
-                                       handle_bad_irq, IRQ_TYPE_NONE);
-               if (ret) {
-                       dev_err(dev, "could not connect irqchip to gpiochip\n");
-                       return ret;
-               }
+               girq = &pctl->gpio.irq;
+               girq->chip = &pctl->irq_chip;
+               /* This will let us handle the parent IRQ in the driver */
+               girq->parent_handler = NULL;
+               girq->num_parents = 0;
+               girq->parents = NULL;
+               girq->default_type = IRQ_TYPE_NONE;
+               girq->handler = handle_bad_irq;
+               girq->threaded = true;
 
                ret = devm_request_threaded_irq(dev, client->irq, NULL,
                                                sx150x_irq_thread_fn,
@@ -1237,12 +1232,17 @@ static int sx150x_probe(struct i2c_client *client,
                                                pctl->irq_chip.name, pctl);
                if (ret < 0)
                        return ret;
-
-               gpiochip_set_nested_irqchip(&pctl->gpio,
-                                           &pctl->irq_chip,
-                                           client->irq);
        }
 
+       ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl);
+       if (ret)
+               return ret;
+
+       ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev),
+                                    0, 0, pctl->data->npins);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
index 9503ddf..bab888f 100644 (file)
@@ -74,6 +74,7 @@ int pinmux_validate_map(const struct pinctrl_map *map, int i)
  * pinmux_can_be_used_for_gpio() - check if a specific pin
  *     is either muxed to a different function or used as gpio.
  *
+ * @pctldev: the associated pin controller device
  * @pin: the pin number in the global pin space
  *
  * Controllers not defined as strict will always return true,
@@ -96,6 +97,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
 
 /**
  * pin_request() - request a single pin to be muxed in, typically for GPIO
+ * @pctldev: the associated pin controller device
  * @pin: the pin number in the global pin space
  * @owner: a representation of the owner of this pin; typically the device
  *     name that controls its mux function, or the requested GPIO name
@@ -254,6 +256,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
  * @pctldev: pin controller device affected
  * @pin: the pin to mux in for GPIO
  * @range: the applicable GPIO range
+ * @gpio: number of requested GPIO
  */
 int pinmux_request_gpio(struct pinctrl_dev *pctldev,
                        struct pinctrl_gpio_range *range,
@@ -744,7 +747,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups);
 /**
  * pinmux_generic_get_function() - returns a function based on the number
  * @pctldev: pin controller device
- * @group_selector: function number
+ * @selector: function number
  */
 struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
                                                  unsigned int selector)
index 8bdb5bd..63915cb 100644 (file)
@@ -254,6 +254,7 @@ DECLARE_QCA_GPIO_PINS(99);
                .mux_bit = 2,                   \
                .pull_bit = 0,                  \
                .drv_bit = 6,                   \
+               .od_bit = 12,                   \
                .oe_bit = 9,                    \
                .in_bit = 0,                    \
                .out_bit = 1,                   \
index 0edd41c..aec68b1 100644 (file)
@@ -50,6 +50,7 @@
                .intr_enable_bit = 0,           \
                .intr_status_bit = 0,           \
                .intr_target_bit = 5,           \
+               .intr_target_kpss_val = 3,      \
                .intr_raw_status_bit = 4,       \
                .intr_polarity_bit = 1,         \
                .intr_detection_bit = 2,        \
index c322f30..a2567e7 100644 (file)
  * @dev:            device handle.
  * @pctrl:          pinctrl handle.
  * @chip:           gpiochip handle.
+ * @desc:           pin controller descriptor
  * @restart_nb:     restart notifier block.
+ * @irq_chip:       irq chip information
  * @irq:            parent irq for the TLMM irq_chip.
+ * @intr_target_use_scm: route irq to application cpu using scm calls
  * @lock:           Spinlock to protect register resources as well
  *                  as msm_pinctrl data structures.
  * @enabled_irqs:   Bitmap of currently enabled irqs.
  * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
  *                  detection.
  * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
- * @soc;            Reference to soc_data of platform specific data.
+ * @soc:            Reference to soc_data of platform specific data.
  * @regs:           Base addresses for the TLMM tiles.
+ * @phys_base:      Physical base address
  */
 struct msm_pinctrl {
        struct device *dev;
@@ -233,6 +237,10 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
                *bit = g->pull_bit;
                *mask = 3;
                break;
+       case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+               *bit = g->od_bit;
+               *mask = 1;
+               break;
        case PIN_CONFIG_DRIVE_STRENGTH:
                *bit = g->drv_bit;
                *mask = 7;
@@ -310,6 +318,12 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
                if (!arg)
                        return -EINVAL;
                break;
+       case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+               /* Pin is not open-drain */
+               if (!arg)
+                       return -EINVAL;
+               arg = 1;
+               break;
        case PIN_CONFIG_DRIVE_STRENGTH:
                arg = msm_regval_to_drive(arg);
                break;
@@ -382,6 +396,9 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
                        else
                                arg = MSM_PULL_UP;
                        break;
+               case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+                       arg = 1;
+                       break;
                case PIN_CONFIG_DRIVE_STRENGTH:
                        /* Check for invalid values */
                        if (arg > 16 || arg < 2 || (arg % 2) != 0)
index 7486fe0..333f992 100644 (file)
@@ -38,6 +38,7 @@ struct msm_function {
  * @mux_bit:              Offset in @ctl_reg for the pinmux function selection.
  * @pull_bit:             Offset in @ctl_reg for the bias configuration.
  * @drv_bit:              Offset in @ctl_reg for the drive strength configuration.
+ * @od_bit:               Offset in @ctl_reg for controlling open drain.
  * @oe_bit:               Offset in @ctl_reg for controlling output enable.
  * @in_bit:               Offset in @io_reg for the input bit value.
  * @out_bit:              Offset in @io_reg for the output bit value.
@@ -75,6 +76,7 @@ struct msm_pingroup {
        unsigned pull_bit:5;
        unsigned drv_bit:5;
 
+       unsigned od_bit:5;
        unsigned oe_bit:5;
        unsigned in_bit:5;
        unsigned out_bit:5;
index 183f0b2..ec43edf 100644 (file)
@@ -799,9 +799,6 @@ static const char * const pa_indicator_groups[] = {
 static const char * const modem_tsync_groups[] = {
        "gpio93",
 };
-static const char * const nav_tsync_groups[] = {
-       "gpio93",
-};
 static const char * const ssbi_wtr1_groups[] = {
        "gpio79", "gpio94",
 };
index 092a48e..1744138 100644 (file)
@@ -794,13 +794,13 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
        switch (subtype) {
        case PMIC_GPIO_SUBTYPE_GPIO_4CH:
                pad->have_buffer = true;
-               /* Fall through */
+               fallthrough;
        case PMIC_GPIO_SUBTYPE_GPIOC_4CH:
                pad->num_sources = 4;
                break;
        case PMIC_GPIO_SUBTYPE_GPIO_8CH:
                pad->have_buffer = true;
-               /* Fall through */
+               fallthrough;
        case PMIC_GPIO_SUBTYPE_GPIOC_8CH:
                pad->num_sources = 8;
                break;
@@ -1117,6 +1117,10 @@ static const struct of_device_id pmic_gpio_of_match[] = {
        { .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
        /* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
        { .compatible = "qcom,pms405-gpio", .data = (void *) 12 },
+       /* pm660 has 13 GPIOs with holes on 1, 5, 6, 7, 8 and 10 */
+       { .compatible = "qcom,pm660-gpio", .data = (void *) 13 },
+       /* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */
+       { .compatible = "qcom,pm660l-gpio", .data = (void *) 12 },
        /* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
        { .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
        /* pm8150b has 12 GPIOs with holes on 3, r and 7 */
index 338a15d..b5949f7 100644 (file)
@@ -346,7 +346,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
                                return -EINVAL;
                        }
                        pin->pull_up_strength = arg;
-                       /* FALLTHROUGH */
+                       fallthrough;
                case PIN_CONFIG_BIAS_PULL_UP:
                        pin->bias = pin->pull_up_strength;
                        banks |= BIT(2);
index 84501c7..b9ea09f 100644 (file)
@@ -38,7 +38,7 @@ struct exynos_irq_chip {
        u32 eint_con;
        u32 eint_mask;
        u32 eint_pend;
-       u32 eint_wake_mask_value;
+       u32 *eint_wake_mask_value;
        u32 eint_wake_mask_reg;
        void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
                                     struct exynos_irq_chip *irq_chip);
@@ -207,7 +207,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
 /*
  * irq_chip for gpio interrupts.
  */
-static struct exynos_irq_chip exynos_gpio_irq_chip = {
+static const struct exynos_irq_chip exynos_gpio_irq_chip __initconst = {
        .chip = {
                .name = "exynos_gpio_irq_chip",
                .irq_unmask = exynos_irq_unmask,
@@ -274,7 +274,7 @@ struct exynos_eint_gpio_save {
  * exynos_eint_gpio_init() - setup handling of external gpio interrupts.
  * @d: driver data of samsung pinctrl driver.
  */
-int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
+__init int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
 {
        struct samsung_pin_bank *bank;
        struct device *dev = d->dev;
@@ -297,6 +297,15 @@ int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
        for (i = 0; i < d->nr_banks; ++i, ++bank) {
                if (bank->eint_type != EINT_TYPE_GPIO)
                        continue;
+
+               bank->irq_chip = devm_kmemdup(dev, &exynos_gpio_irq_chip,
+                                          sizeof(*bank->irq_chip), GFP_KERNEL);
+               if (!bank->irq_chip) {
+                       ret = -ENOMEM;
+                       goto err_domains;
+               }
+               bank->irq_chip->chip.name = bank->name;
+
                bank->irq_domain = irq_domain_add_linear(bank->of_node,
                                bank->nr_pins, &exynos_eint_irqd_ops, bank);
                if (!bank->irq_domain) {
@@ -313,7 +322,6 @@ int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
                        goto err_domains;
                }
 
-               bank->irq_chip = &exynos_gpio_irq_chip;
        }
 
        return 0;
@@ -338,9 +346,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
        pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq);
 
        if (!on)
-               our_chip->eint_wake_mask_value |= bit;
+               *our_chip->eint_wake_mask_value |= bit;
        else
-               our_chip->eint_wake_mask_value &= ~bit;
+               *our_chip->eint_wake_mask_value &= ~bit;
 
        return 0;
 }
@@ -360,10 +368,10 @@ exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
        pmu_regs = drvdata->retention_ctrl->priv;
        dev_info(drvdata->dev,
                 "Setting external wakeup interrupt mask: 0x%x\n",
-                irq_chip->eint_wake_mask_value);
+                *irq_chip->eint_wake_mask_value);
 
        regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
-                    irq_chip->eint_wake_mask_value);
+                    *irq_chip->eint_wake_mask_value);
 }
 
 static void
@@ -382,10 +390,11 @@ s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
 
        clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
 
-       __raw_writel(irq_chip->eint_wake_mask_value,
+       __raw_writel(*irq_chip->eint_wake_mask_value,
                     clk_base + irq_chip->eint_wake_mask_reg);
 }
 
+static u32 eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED;
 /*
  * irq_chip for wakeup interrupts
  */
@@ -403,7 +412,7 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
        .eint_con = EXYNOS_WKUP_ECON_OFFSET,
        .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
        .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
-       .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+       .eint_wake_mask_value = &eint_wake_mask_value,
        /* Only differences with exynos4210_wkup_irq_chip: */
        .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
        .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
@@ -423,7 +432,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
        .eint_con = EXYNOS_WKUP_ECON_OFFSET,
        .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
        .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
-       .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+       .eint_wake_mask_value = &eint_wake_mask_value,
        .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
        .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
 };
@@ -442,7 +451,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
        .eint_con = EXYNOS7_WKUP_ECON_OFFSET,
        .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
        .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
-       .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
+       .eint_wake_mask_value = &eint_wake_mask_value,
        .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
        .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
 };
@@ -513,7 +522,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
  * exynos_eint_wkup_init() - setup handling of external wakeup interrupts.
  * @d: driver data of samsung pinctrl driver.
  */
-int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
+__init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
 {
        struct device *dev = d->dev;
        struct device_node *wkup_np = NULL;
@@ -521,7 +530,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
        struct samsung_pin_bank *bank;
        struct exynos_weint_data *weint_data;
        struct exynos_muxed_weint_data *muxed_data;
-       struct exynos_irq_chip *irq_chip;
+       const struct exynos_irq_chip *irq_chip;
        unsigned int muxed_banks = 0;
        unsigned int i;
        int idx, irq;
@@ -531,12 +540,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
 
                match = of_match_node(exynos_wkup_irq_ids, np);
                if (match) {
-                       irq_chip = kmemdup(match->data,
-                               sizeof(*irq_chip), GFP_KERNEL);
-                       if (!irq_chip) {
-                               of_node_put(np);
-                               return -ENOMEM;
-                       }
+                       irq_chip = match->data;
                        wkup_np = np;
                        break;
                }
@@ -549,6 +553,14 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
                if (bank->eint_type != EINT_TYPE_WKUP)
                        continue;
 
+               bank->irq_chip = devm_kmemdup(dev, irq_chip, sizeof(*irq_chip),
+                                             GFP_KERNEL);
+               if (!bank->irq_chip) {
+                       of_node_put(wkup_np);
+                       return -ENOMEM;
+               }
+               bank->irq_chip->chip.name = bank->name;
+
                bank->irq_domain = irq_domain_add_linear(bank->of_node,
                                bank->nr_pins, &exynos_eint_irqd_ops, bank);
                if (!bank->irq_domain) {
@@ -557,8 +569,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
                        return -ENXIO;
                }
 
-               bank->irq_chip = irq_chip;
-
                if (!of_find_property(bank->of_node, "interrupts", NULL)) {
                        bank->eint_type = EINT_TYPE_WKUP_MUX;
                        ++muxed_banks;
@@ -657,10 +667,6 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
                                irq_chip = bank->irq_chip;
                                irq_chip->set_eint_wakeup_mask(drvdata,
                                                               irq_chip);
-                       } else if (bank->irq_chip != irq_chip) {
-                               dev_warn(drvdata->dev,
-                                        "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
-                                        bank->name);
                        }
                }
        }
index 9bd0a3d..5e24838 100644 (file)
@@ -80,7 +80,7 @@ static const struct samsung_pin_bank_type bank_type_2bit = {
        }
 
 /**
- * struct s3c24xx_eint_data: EINT common data
+ * struct s3c24xx_eint_data - EINT common data
  * @drvdata: pin controller driver data
  * @domains: IRQ domains of particular EINT interrupts
  * @parents: mapped parent irqs in the main interrupt controller
@@ -92,10 +92,10 @@ struct s3c24xx_eint_data {
 };
 
 /**
- * struct s3c24xx_eint_domain_data: per irq-domain data
+ * struct s3c24xx_eint_domain_data - per irq-domain data
  * @bank: pin bank related to the domain
  * @eint_data: common data
- * eint0_3_parent_only: live eints 0-3 only in the main intc
+ * @eint0_3_parent_only: live eints 0-3 only in the main intc
  */
 struct s3c24xx_eint_domain_data {
        struct samsung_pin_bank *bank;
index f97f817..b8166e3 100644 (file)
@@ -193,7 +193,7 @@ static const struct samsung_pin_bank_type bank_type_2bit_alive = {
        }
 
 /**
- * struct s3c64xx_eint0_data: EINT0 common data
+ * struct s3c64xx_eint0_data - EINT0 common data
  * @drvdata: pin controller driver data
  * @domains: IRQ domains of particular EINT0 interrupts
  * @pins: pin offsets inside of banks of particular EINT0 interrupts
@@ -205,7 +205,7 @@ struct s3c64xx_eint0_data {
 };
 
 /**
- * struct s3c64xx_eint0_domain_data: EINT0 per-domain data
+ * struct s3c64xx_eint0_domain_data - EINT0 per-domain data
  * @bank: pin bank related to the domain
  * @eints: EINT0 interrupts related to the domain
  */
@@ -215,7 +215,7 @@ struct s3c64xx_eint0_domain_data {
 };
 
 /**
- * struct s3c64xx_eint_gpio_data: GPIO EINT data
+ * struct s3c64xx_eint_gpio_data - GPIO EINT data
  * @drvdata: pin controller driver data
  * @domains: array of domains related to EINT interrupt groups
  */
index f26574e..608eb5a 100644 (file)
@@ -1140,7 +1140,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
-/**
+/*
  * samsung_pinctrl_suspend - save pinctrl state for suspend
  *
  * Save data for all banks handled by this device.
@@ -1187,7 +1187,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
        return 0;
 }
 
-/**
+/*
  * samsung_pinctrl_resume - restore pinctrl state from suspend
  *
  * Restore one of the banks that was saved during suspend.
index c461a2f..7fdc7ed 100644 (file)
@@ -20,6 +20,7 @@ config PINCTRL_SH_PFC
        select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
        select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1
        select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
+       select PINCTRL_PFC_R8A774E1 if ARCH_R8A774E1
        select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
        select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
        select PINCTRL_PFC_R8A7790 if ARCH_R8A7790
@@ -99,6 +100,9 @@ config PINCTRL_PFC_R8A774B1
 config PINCTRL_PFC_R8A774C0
        bool "RZ/G2E pin control support" if COMPILE_TEST
 
+config PINCTRL_PFC_R8A774E1
+       bool "RZ/G2H pin control support" if COMPILE_TEST
+
 config PINCTRL_PFC_R8A7778
        bool "R-Car M1A pin control support" if COMPILE_TEST
 
index 3855d82..7bb9918 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77470)    += pfc-r8a77470.o
 obj-$(CONFIG_PINCTRL_PFC_R8A774A1)     += pfc-r8a7796.o
 obj-$(CONFIG_PINCTRL_PFC_R8A774B1)     += pfc-r8a77965.o
 obj-$(CONFIG_PINCTRL_PFC_R8A774C0)     += pfc-r8a77990.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774E1)     += pfc-r8a77951.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7778)      += pfc-r8a7778.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7779)      += pfc-r8a7779.o
 obj-$(CONFIG_PINCTRL_PFC_R8A7790)      += pfc-r8a7790.o
index f368383..c528c12 100644 (file)
@@ -533,6 +533,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
                .data = &r8a774c0_pinmux_info,
        },
 #endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774E1
+       {
+               .compatible = "renesas,pfc-r8a774e1",
+               .data = &r8a774e1_pinmux_info,
+       },
+#endif
 #ifdef CONFIG_PINCTRL_PFC_R8A7778
        {
                .compatible = "renesas,pfc-r8a7778",
index 256fab4..a94ebe0 100644 (file)
@@ -4157,357 +4157,365 @@ static const unsigned int vin5_clk_mux[] = {
        VI5_CLK_MARK,
 };
 
-static const struct sh_pfc_pin_group pinmux_groups[] = {
-       SH_PFC_PIN_GROUP(audio_clk_a_a),
-       SH_PFC_PIN_GROUP(audio_clk_a_b),
-       SH_PFC_PIN_GROUP(audio_clk_a_c),
-       SH_PFC_PIN_GROUP(audio_clk_b_a),
-       SH_PFC_PIN_GROUP(audio_clk_b_b),
-       SH_PFC_PIN_GROUP(audio_clk_c_a),
-       SH_PFC_PIN_GROUP(audio_clk_c_b),
-       SH_PFC_PIN_GROUP(audio_clkout_a),
-       SH_PFC_PIN_GROUP(audio_clkout_b),
-       SH_PFC_PIN_GROUP(audio_clkout_c),
-       SH_PFC_PIN_GROUP(audio_clkout_d),
-       SH_PFC_PIN_GROUP(audio_clkout1_a),
-       SH_PFC_PIN_GROUP(audio_clkout1_b),
-       SH_PFC_PIN_GROUP(audio_clkout2_a),
-       SH_PFC_PIN_GROUP(audio_clkout2_b),
-       SH_PFC_PIN_GROUP(audio_clkout3_a),
-       SH_PFC_PIN_GROUP(audio_clkout3_b),
-       SH_PFC_PIN_GROUP(avb_link),
-       SH_PFC_PIN_GROUP(avb_magic),
-       SH_PFC_PIN_GROUP(avb_phy_int),
-       SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),      /* Deprecated */
-       SH_PFC_PIN_GROUP(avb_mdio),
-       SH_PFC_PIN_GROUP(avb_mii),
-       SH_PFC_PIN_GROUP(avb_avtp_pps),
-       SH_PFC_PIN_GROUP(avb_avtp_match_a),
-       SH_PFC_PIN_GROUP(avb_avtp_capture_a),
-       SH_PFC_PIN_GROUP(avb_avtp_match_b),
-       SH_PFC_PIN_GROUP(avb_avtp_capture_b),
-       SH_PFC_PIN_GROUP(can0_data_a),
-       SH_PFC_PIN_GROUP(can0_data_b),
-       SH_PFC_PIN_GROUP(can1_data),
-       SH_PFC_PIN_GROUP(can_clk),
-       SH_PFC_PIN_GROUP(canfd0_data_a),
-       SH_PFC_PIN_GROUP(canfd0_data_b),
-       SH_PFC_PIN_GROUP(canfd1_data),
-       SH_PFC_PIN_GROUP(drif0_ctrl_a),
-       SH_PFC_PIN_GROUP(drif0_data0_a),
-       SH_PFC_PIN_GROUP(drif0_data1_a),
-       SH_PFC_PIN_GROUP(drif0_ctrl_b),
-       SH_PFC_PIN_GROUP(drif0_data0_b),
-       SH_PFC_PIN_GROUP(drif0_data1_b),
-       SH_PFC_PIN_GROUP(drif0_ctrl_c),
-       SH_PFC_PIN_GROUP(drif0_data0_c),
-       SH_PFC_PIN_GROUP(drif0_data1_c),
-       SH_PFC_PIN_GROUP(drif1_ctrl_a),
-       SH_PFC_PIN_GROUP(drif1_data0_a),
-       SH_PFC_PIN_GROUP(drif1_data1_a),
-       SH_PFC_PIN_GROUP(drif1_ctrl_b),
-       SH_PFC_PIN_GROUP(drif1_data0_b),
-       SH_PFC_PIN_GROUP(drif1_data1_b),
-       SH_PFC_PIN_GROUP(drif1_ctrl_c),
-       SH_PFC_PIN_GROUP(drif1_data0_c),
-       SH_PFC_PIN_GROUP(drif1_data1_c),
-       SH_PFC_PIN_GROUP(drif2_ctrl_a),
-       SH_PFC_PIN_GROUP(drif2_data0_a),
-       SH_PFC_PIN_GROUP(drif2_data1_a),
-       SH_PFC_PIN_GROUP(drif2_ctrl_b),
-       SH_PFC_PIN_GROUP(drif2_data0_b),
-       SH_PFC_PIN_GROUP(drif2_data1_b),
-       SH_PFC_PIN_GROUP(drif3_ctrl_a),
-       SH_PFC_PIN_GROUP(drif3_data0_a),
-       SH_PFC_PIN_GROUP(drif3_data1_a),
-       SH_PFC_PIN_GROUP(drif3_ctrl_b),
-       SH_PFC_PIN_GROUP(drif3_data0_b),
-       SH_PFC_PIN_GROUP(drif3_data1_b),
-       SH_PFC_PIN_GROUP(du_rgb666),
-       SH_PFC_PIN_GROUP(du_rgb888),
-       SH_PFC_PIN_GROUP(du_clk_out_0),
-       SH_PFC_PIN_GROUP(du_clk_out_1),
-       SH_PFC_PIN_GROUP(du_sync),
-       SH_PFC_PIN_GROUP(du_oddf),
-       SH_PFC_PIN_GROUP(du_cde),
-       SH_PFC_PIN_GROUP(du_disp),
-       SH_PFC_PIN_GROUP(hscif0_data),
-       SH_PFC_PIN_GROUP(hscif0_clk),
-       SH_PFC_PIN_GROUP(hscif0_ctrl),
-       SH_PFC_PIN_GROUP(hscif1_data_a),
-       SH_PFC_PIN_GROUP(hscif1_clk_a),
-       SH_PFC_PIN_GROUP(hscif1_ctrl_a),
-       SH_PFC_PIN_GROUP(hscif1_data_b),
-       SH_PFC_PIN_GROUP(hscif1_clk_b),
-       SH_PFC_PIN_GROUP(hscif1_ctrl_b),
-       SH_PFC_PIN_GROUP(hscif2_data_a),
-       SH_PFC_PIN_GROUP(hscif2_clk_a),
-       SH_PFC_PIN_GROUP(hscif2_ctrl_a),
-       SH_PFC_PIN_GROUP(hscif2_data_b),
-       SH_PFC_PIN_GROUP(hscif2_clk_b),
-       SH_PFC_PIN_GROUP(hscif2_ctrl_b),
-       SH_PFC_PIN_GROUP(hscif2_data_c),
-       SH_PFC_PIN_GROUP(hscif2_clk_c),
-       SH_PFC_PIN_GROUP(hscif2_ctrl_c),
-       SH_PFC_PIN_GROUP(hscif3_data_a),
-       SH_PFC_PIN_GROUP(hscif3_clk),
-       SH_PFC_PIN_GROUP(hscif3_ctrl),
-       SH_PFC_PIN_GROUP(hscif3_data_b),
-       SH_PFC_PIN_GROUP(hscif3_data_c),
-       SH_PFC_PIN_GROUP(hscif3_data_d),
-       SH_PFC_PIN_GROUP(hscif4_data_a),
-       SH_PFC_PIN_GROUP(hscif4_clk),
-       SH_PFC_PIN_GROUP(hscif4_ctrl),
-       SH_PFC_PIN_GROUP(hscif4_data_b),
-       SH_PFC_PIN_GROUP(i2c0),
-       SH_PFC_PIN_GROUP(i2c1_a),
-       SH_PFC_PIN_GROUP(i2c1_b),
-       SH_PFC_PIN_GROUP(i2c2_a),
-       SH_PFC_PIN_GROUP(i2c2_b),
-       SH_PFC_PIN_GROUP(i2c3),
-       SH_PFC_PIN_GROUP(i2c5),
-       SH_PFC_PIN_GROUP(i2c6_a),
-       SH_PFC_PIN_GROUP(i2c6_b),
-       SH_PFC_PIN_GROUP(i2c6_c),
-       SH_PFC_PIN_GROUP(intc_ex_irq0),
-       SH_PFC_PIN_GROUP(intc_ex_irq1),
-       SH_PFC_PIN_GROUP(intc_ex_irq2),
-       SH_PFC_PIN_GROUP(intc_ex_irq3),
-       SH_PFC_PIN_GROUP(intc_ex_irq4),
-       SH_PFC_PIN_GROUP(intc_ex_irq5),
-       SH_PFC_PIN_GROUP(msiof0_clk),
-       SH_PFC_PIN_GROUP(msiof0_sync),
-       SH_PFC_PIN_GROUP(msiof0_ss1),
-       SH_PFC_PIN_GROUP(msiof0_ss2),
-       SH_PFC_PIN_GROUP(msiof0_txd),
-       SH_PFC_PIN_GROUP(msiof0_rxd),
-       SH_PFC_PIN_GROUP(msiof1_clk_a),
-       SH_PFC_PIN_GROUP(msiof1_sync_a),
-       SH_PFC_PIN_GROUP(msiof1_ss1_a),
-       SH_PFC_PIN_GROUP(msiof1_ss2_a),
-       SH_PFC_PIN_GROUP(msiof1_txd_a),
-       SH_PFC_PIN_GROUP(msiof1_rxd_a),
-       SH_PFC_PIN_GROUP(msiof1_clk_b),
-       SH_PFC_PIN_GROUP(msiof1_sync_b),
-       SH_PFC_PIN_GROUP(msiof1_ss1_b),
-       SH_PFC_PIN_GROUP(msiof1_ss2_b),
-       SH_PFC_PIN_GROUP(msiof1_txd_b),
-       SH_PFC_PIN_GROUP(msiof1_rxd_b),
-       SH_PFC_PIN_GROUP(msiof1_clk_c),
-       SH_PFC_PIN_GROUP(msiof1_sync_c),
-       SH_PFC_PIN_GROUP(msiof1_ss1_c),
-       SH_PFC_PIN_GROUP(msiof1_ss2_c),
-       SH_PFC_PIN_GROUP(msiof1_txd_c),
-       SH_PFC_PIN_GROUP(msiof1_rxd_c),
-       SH_PFC_PIN_GROUP(msiof1_clk_d),
-       SH_PFC_PIN_GROUP(msiof1_sync_d),
-       SH_PFC_PIN_GROUP(msiof1_ss1_d),
-       SH_PFC_PIN_GROUP(msiof1_ss2_d),
-       SH_PFC_PIN_GROUP(msiof1_txd_d),
-       SH_PFC_PIN_GROUP(msiof1_rxd_d),
-       SH_PFC_PIN_GROUP(msiof1_clk_e),
-       SH_PFC_PIN_GROUP(msiof1_sync_e),
-       SH_PFC_PIN_GROUP(msiof1_ss1_e),
-       SH_PFC_PIN_GROUP(msiof1_ss2_e),
-       SH_PFC_PIN_GROUP(msiof1_txd_e),
-       SH_PFC_PIN_GROUP(msiof1_rxd_e),
-       SH_PFC_PIN_GROUP(msiof1_clk_f),
-       SH_PFC_PIN_GROUP(msiof1_sync_f),
-       SH_PFC_PIN_GROUP(msiof1_ss1_f),
-       SH_PFC_PIN_GROUP(msiof1_ss2_f),
-       SH_PFC_PIN_GROUP(msiof1_txd_f),
-       SH_PFC_PIN_GROUP(msiof1_rxd_f),
-       SH_PFC_PIN_GROUP(msiof1_clk_g),
-       SH_PFC_PIN_GROUP(msiof1_sync_g),
-       SH_PFC_PIN_GROUP(msiof1_ss1_g),
-       SH_PFC_PIN_GROUP(msiof1_ss2_g),
-       SH_PFC_PIN_GROUP(msiof1_txd_g),
-       SH_PFC_PIN_GROUP(msiof1_rxd_g),
-       SH_PFC_PIN_GROUP(msiof2_clk_a),
-       SH_PFC_PIN_GROUP(msiof2_sync_a),
-       SH_PFC_PIN_GROUP(msiof2_ss1_a),
-       SH_PFC_PIN_GROUP(msiof2_ss2_a),
-       SH_PFC_PIN_GROUP(msiof2_txd_a),
-       SH_PFC_PIN_GROUP(msiof2_rxd_a),
-       SH_PFC_PIN_GROUP(msiof2_clk_b),
-       SH_PFC_PIN_GROUP(msiof2_sync_b),
-       SH_PFC_PIN_GROUP(msiof2_ss1_b),
-       SH_PFC_PIN_GROUP(msiof2_ss2_b),
-       SH_PFC_PIN_GROUP(msiof2_txd_b),
-       SH_PFC_PIN_GROUP(msiof2_rxd_b),
-       SH_PFC_PIN_GROUP(msiof2_clk_c),
-       SH_PFC_PIN_GROUP(msiof2_sync_c),
-       SH_PFC_PIN_GROUP(msiof2_ss1_c),
-       SH_PFC_PIN_GROUP(msiof2_ss2_c),
-       SH_PFC_PIN_GROUP(msiof2_txd_c),
-       SH_PFC_PIN_GROUP(msiof2_rxd_c),
-       SH_PFC_PIN_GROUP(msiof2_clk_d),
-       SH_PFC_PIN_GROUP(msiof2_sync_d),
-       SH_PFC_PIN_GROUP(msiof2_ss1_d),
-       SH_PFC_PIN_GROUP(msiof2_ss2_d),
-       SH_PFC_PIN_GROUP(msiof2_txd_d),
-       SH_PFC_PIN_GROUP(msiof2_rxd_d),
-       SH_PFC_PIN_GROUP(msiof3_clk_a),
-       SH_PFC_PIN_GROUP(msiof3_sync_a),
-       SH_PFC_PIN_GROUP(msiof3_ss1_a),
-       SH_PFC_PIN_GROUP(msiof3_ss2_a),
-       SH_PFC_PIN_GROUP(msiof3_txd_a),
-       SH_PFC_PIN_GROUP(msiof3_rxd_a),
-       SH_PFC_PIN_GROUP(msiof3_clk_b),
-       SH_PFC_PIN_GROUP(msiof3_sync_b),
-       SH_PFC_PIN_GROUP(msiof3_ss1_b),
-       SH_PFC_PIN_GROUP(msiof3_ss2_b),
-       SH_PFC_PIN_GROUP(msiof3_txd_b),
-       SH_PFC_PIN_GROUP(msiof3_rxd_b),
-       SH_PFC_PIN_GROUP(msiof3_clk_c),
-       SH_PFC_PIN_GROUP(msiof3_sync_c),
-       SH_PFC_PIN_GROUP(msiof3_txd_c),
-       SH_PFC_PIN_GROUP(msiof3_rxd_c),
-       SH_PFC_PIN_GROUP(msiof3_clk_d),
-       SH_PFC_PIN_GROUP(msiof3_sync_d),
-       SH_PFC_PIN_GROUP(msiof3_ss1_d),
-       SH_PFC_PIN_GROUP(msiof3_txd_d),
-       SH_PFC_PIN_GROUP(msiof3_rxd_d),
-       SH_PFC_PIN_GROUP(msiof3_clk_e),
-       SH_PFC_PIN_GROUP(msiof3_sync_e),
-       SH_PFC_PIN_GROUP(msiof3_ss1_e),
-       SH_PFC_PIN_GROUP(msiof3_ss2_e),
-       SH_PFC_PIN_GROUP(msiof3_txd_e),
-       SH_PFC_PIN_GROUP(msiof3_rxd_e),
-       SH_PFC_PIN_GROUP(pwm0),
-       SH_PFC_PIN_GROUP(pwm1_a),
-       SH_PFC_PIN_GROUP(pwm1_b),
-       SH_PFC_PIN_GROUP(pwm2_a),
-       SH_PFC_PIN_GROUP(pwm2_b),
-       SH_PFC_PIN_GROUP(pwm3_a),
-       SH_PFC_PIN_GROUP(pwm3_b),
-       SH_PFC_PIN_GROUP(pwm4_a),
-       SH_PFC_PIN_GROUP(pwm4_b),
-       SH_PFC_PIN_GROUP(pwm5_a),
-       SH_PFC_PIN_GROUP(pwm5_b),
-       SH_PFC_PIN_GROUP(pwm6_a),
-       SH_PFC_PIN_GROUP(pwm6_b),
-       SH_PFC_PIN_GROUP(sata0_devslp_a),
-       SH_PFC_PIN_GROUP(sata0_devslp_b),
-       SH_PFC_PIN_GROUP(scif0_data),
-       SH_PFC_PIN_GROUP(scif0_clk),
-       SH_PFC_PIN_GROUP(scif0_ctrl),
-       SH_PFC_PIN_GROUP(scif1_data_a),
-       SH_PFC_PIN_GROUP(scif1_clk),
-       SH_PFC_PIN_GROUP(scif1_ctrl),
-       SH_PFC_PIN_GROUP(scif1_data_b),
-       SH_PFC_PIN_GROUP(scif2_data_a),
-       SH_PFC_PIN_GROUP(scif2_clk),
-       SH_PFC_PIN_GROUP(scif2_data_b),
-       SH_PFC_PIN_GROUP(scif3_data_a),
-       SH_PFC_PIN_GROUP(scif3_clk),
-       SH_PFC_PIN_GROUP(scif3_ctrl),
-       SH_PFC_PIN_GROUP(scif3_data_b),
-       SH_PFC_PIN_GROUP(scif4_data_a),
-       SH_PFC_PIN_GROUP(scif4_clk_a),
-       SH_PFC_PIN_GROUP(scif4_ctrl_a),
-       SH_PFC_PIN_GROUP(scif4_data_b),
-       SH_PFC_PIN_GROUP(scif4_clk_b),
-       SH_PFC_PIN_GROUP(scif4_ctrl_b),
-       SH_PFC_PIN_GROUP(scif4_data_c),
-       SH_PFC_PIN_GROUP(scif4_clk_c),
-       SH_PFC_PIN_GROUP(scif4_ctrl_c),
-       SH_PFC_PIN_GROUP(scif5_data_a),
-       SH_PFC_PIN_GROUP(scif5_clk_a),
-       SH_PFC_PIN_GROUP(scif5_data_b),
-       SH_PFC_PIN_GROUP(scif5_clk_b),
-       SH_PFC_PIN_GROUP(scif_clk_a),
-       SH_PFC_PIN_GROUP(scif_clk_b),
-       SH_PFC_PIN_GROUP(sdhi0_data1),
-       SH_PFC_PIN_GROUP(sdhi0_data4),
-       SH_PFC_PIN_GROUP(sdhi0_ctrl),
-       SH_PFC_PIN_GROUP(sdhi0_cd),
-       SH_PFC_PIN_GROUP(sdhi0_wp),
-       SH_PFC_PIN_GROUP(sdhi1_data1),
-       SH_PFC_PIN_GROUP(sdhi1_data4),
-       SH_PFC_PIN_GROUP(sdhi1_ctrl),
-       SH_PFC_PIN_GROUP(sdhi1_cd),
-       SH_PFC_PIN_GROUP(sdhi1_wp),
-       SH_PFC_PIN_GROUP(sdhi2_data1),
-       SH_PFC_PIN_GROUP(sdhi2_data4),
-       SH_PFC_PIN_GROUP(sdhi2_data8),
-       SH_PFC_PIN_GROUP(sdhi2_ctrl),
-       SH_PFC_PIN_GROUP(sdhi2_cd_a),
-       SH_PFC_PIN_GROUP(sdhi2_wp_a),
-       SH_PFC_PIN_GROUP(sdhi2_cd_b),
-       SH_PFC_PIN_GROUP(sdhi2_wp_b),
-       SH_PFC_PIN_GROUP(sdhi2_ds),
-       SH_PFC_PIN_GROUP(sdhi3_data1),
-       SH_PFC_PIN_GROUP(sdhi3_data4),
-       SH_PFC_PIN_GROUP(sdhi3_data8),
-       SH_PFC_PIN_GROUP(sdhi3_ctrl),
-       SH_PFC_PIN_GROUP(sdhi3_cd),
-       SH_PFC_PIN_GROUP(sdhi3_wp),
-       SH_PFC_PIN_GROUP(sdhi3_ds),
-       SH_PFC_PIN_GROUP(ssi0_data),
-       SH_PFC_PIN_GROUP(ssi01239_ctrl),
-       SH_PFC_PIN_GROUP(ssi1_data_a),
-       SH_PFC_PIN_GROUP(ssi1_data_b),
-       SH_PFC_PIN_GROUP(ssi1_ctrl_a),
-       SH_PFC_PIN_GROUP(ssi1_ctrl_b),
-       SH_PFC_PIN_GROUP(ssi2_data_a),
-       SH_PFC_PIN_GROUP(ssi2_data_b),
-       SH_PFC_PIN_GROUP(ssi2_ctrl_a),
-       SH_PFC_PIN_GROUP(ssi2_ctrl_b),
-       SH_PFC_PIN_GROUP(ssi3_data),
-       SH_PFC_PIN_GROUP(ssi349_ctrl),
-       SH_PFC_PIN_GROUP(ssi4_data),
-       SH_PFC_PIN_GROUP(ssi4_ctrl),
-       SH_PFC_PIN_GROUP(ssi5_data),
-       SH_PFC_PIN_GROUP(ssi5_ctrl),
-       SH_PFC_PIN_GROUP(ssi6_data),
-       SH_PFC_PIN_GROUP(ssi6_ctrl),
-       SH_PFC_PIN_GROUP(ssi7_data),
-       SH_PFC_PIN_GROUP(ssi78_ctrl),
-       SH_PFC_PIN_GROUP(ssi8_data),
-       SH_PFC_PIN_GROUP(ssi9_data_a),
-       SH_PFC_PIN_GROUP(ssi9_data_b),
-       SH_PFC_PIN_GROUP(ssi9_ctrl_a),
-       SH_PFC_PIN_GROUP(ssi9_ctrl_b),
-       SH_PFC_PIN_GROUP(tmu_tclk1_a),
-       SH_PFC_PIN_GROUP(tmu_tclk1_b),
-       SH_PFC_PIN_GROUP(tmu_tclk2_a),
-       SH_PFC_PIN_GROUP(tmu_tclk2_b),
-       SH_PFC_PIN_GROUP(tpu_to0),
-       SH_PFC_PIN_GROUP(tpu_to1),
-       SH_PFC_PIN_GROUP(tpu_to2),
-       SH_PFC_PIN_GROUP(tpu_to3),
-       SH_PFC_PIN_GROUP(usb0),
-       SH_PFC_PIN_GROUP(usb1),
-       SH_PFC_PIN_GROUP(usb2),
-       SH_PFC_PIN_GROUP(usb2_ch3),
-       SH_PFC_PIN_GROUP(usb30),
-       VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
-       VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
-       VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
-       VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
-       SH_PFC_PIN_GROUP(vin4_data18_a),
-       VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
-       VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
-       VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
-       VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
-       VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
-       VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
-       SH_PFC_PIN_GROUP(vin4_data18_b),
-       VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
-       VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
-       SH_PFC_PIN_GROUP(vin4_sync),
-       SH_PFC_PIN_GROUP(vin4_field),
-       SH_PFC_PIN_GROUP(vin4_clkenb),
-       SH_PFC_PIN_GROUP(vin4_clk),
-       VIN_DATA_PIN_GROUP(vin5_data, 8),
-       VIN_DATA_PIN_GROUP(vin5_data, 10),
-       VIN_DATA_PIN_GROUP(vin5_data, 12),
-       VIN_DATA_PIN_GROUP(vin5_data, 16),
-       SH_PFC_PIN_GROUP(vin5_sync),
-       SH_PFC_PIN_GROUP(vin5_field),
-       SH_PFC_PIN_GROUP(vin5_clkenb),
-       SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+       struct sh_pfc_pin_group common[320];
+       struct sh_pfc_pin_group automotive[30];
+} pinmux_groups = {
+       .common = {
+               SH_PFC_PIN_GROUP(audio_clk_a_a),
+               SH_PFC_PIN_GROUP(audio_clk_a_b),
+               SH_PFC_PIN_GROUP(audio_clk_a_c),
+               SH_PFC_PIN_GROUP(audio_clk_b_a),
+               SH_PFC_PIN_GROUP(audio_clk_b_b),
+               SH_PFC_PIN_GROUP(audio_clk_c_a),
+               SH_PFC_PIN_GROUP(audio_clk_c_b),
+               SH_PFC_PIN_GROUP(audio_clkout_a),
+               SH_PFC_PIN_GROUP(audio_clkout_b),
+               SH_PFC_PIN_GROUP(audio_clkout_c),
+               SH_PFC_PIN_GROUP(audio_clkout_d),
+               SH_PFC_PIN_GROUP(audio_clkout1_a),
+               SH_PFC_PIN_GROUP(audio_clkout1_b),
+               SH_PFC_PIN_GROUP(audio_clkout2_a),
+               SH_PFC_PIN_GROUP(audio_clkout2_b),
+               SH_PFC_PIN_GROUP(audio_clkout3_a),
+               SH_PFC_PIN_GROUP(audio_clkout3_b),
+               SH_PFC_PIN_GROUP(avb_link),
+               SH_PFC_PIN_GROUP(avb_magic),
+               SH_PFC_PIN_GROUP(avb_phy_int),
+               SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio),      /* Deprecated */
+               SH_PFC_PIN_GROUP(avb_mdio),
+               SH_PFC_PIN_GROUP(avb_mii),
+               SH_PFC_PIN_GROUP(avb_avtp_pps),
+               SH_PFC_PIN_GROUP(avb_avtp_match_a),
+               SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+               SH_PFC_PIN_GROUP(avb_avtp_match_b),
+               SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+               SH_PFC_PIN_GROUP(can0_data_a),
+               SH_PFC_PIN_GROUP(can0_data_b),
+               SH_PFC_PIN_GROUP(can1_data),
+               SH_PFC_PIN_GROUP(can_clk),
+               SH_PFC_PIN_GROUP(canfd0_data_a),
+               SH_PFC_PIN_GROUP(canfd0_data_b),
+               SH_PFC_PIN_GROUP(canfd1_data),
+               SH_PFC_PIN_GROUP(du_rgb666),
+               SH_PFC_PIN_GROUP(du_rgb888),
+               SH_PFC_PIN_GROUP(du_clk_out_0),
+               SH_PFC_PIN_GROUP(du_clk_out_1),
+               SH_PFC_PIN_GROUP(du_sync),
+               SH_PFC_PIN_GROUP(du_oddf),
+               SH_PFC_PIN_GROUP(du_cde),
+               SH_PFC_PIN_GROUP(du_disp),
+               SH_PFC_PIN_GROUP(hscif0_data),
+               SH_PFC_PIN_GROUP(hscif0_clk),
+               SH_PFC_PIN_GROUP(hscif0_ctrl),
+               SH_PFC_PIN_GROUP(hscif1_data_a),
+               SH_PFC_PIN_GROUP(hscif1_clk_a),
+               SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+               SH_PFC_PIN_GROUP(hscif1_data_b),
+               SH_PFC_PIN_GROUP(hscif1_clk_b),
+               SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+               SH_PFC_PIN_GROUP(hscif2_data_a),
+               SH_PFC_PIN_GROUP(hscif2_clk_a),
+               SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+               SH_PFC_PIN_GROUP(hscif2_data_b),
+               SH_PFC_PIN_GROUP(hscif2_clk_b),
+               SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+               SH_PFC_PIN_GROUP(hscif2_data_c),
+               SH_PFC_PIN_GROUP(hscif2_clk_c),
+               SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+               SH_PFC_PIN_GROUP(hscif3_data_a),
+               SH_PFC_PIN_GROUP(hscif3_clk),
+               SH_PFC_PIN_GROUP(hscif3_ctrl),
+               SH_PFC_PIN_GROUP(hscif3_data_b),
+               SH_PFC_PIN_GROUP(hscif3_data_c),
+               SH_PFC_PIN_GROUP(hscif3_data_d),
+               SH_PFC_PIN_GROUP(hscif4_data_a),
+               SH_PFC_PIN_GROUP(hscif4_clk),
+               SH_PFC_PIN_GROUP(hscif4_ctrl),
+               SH_PFC_PIN_GROUP(hscif4_data_b),
+               SH_PFC_PIN_GROUP(i2c0),
+               SH_PFC_PIN_GROUP(i2c1_a),
+               SH_PFC_PIN_GROUP(i2c1_b),
+               SH_PFC_PIN_GROUP(i2c2_a),
+               SH_PFC_PIN_GROUP(i2c2_b),
+               SH_PFC_PIN_GROUP(i2c3),
+               SH_PFC_PIN_GROUP(i2c5),
+               SH_PFC_PIN_GROUP(i2c6_a),
+               SH_PFC_PIN_GROUP(i2c6_b),
+               SH_PFC_PIN_GROUP(i2c6_c),
+               SH_PFC_PIN_GROUP(intc_ex_irq0),
+               SH_PFC_PIN_GROUP(intc_ex_irq1),
+               SH_PFC_PIN_GROUP(intc_ex_irq2),
+               SH_PFC_PIN_GROUP(intc_ex_irq3),
+               SH_PFC_PIN_GROUP(intc_ex_irq4),
+               SH_PFC_PIN_GROUP(intc_ex_irq5),
+               SH_PFC_PIN_GROUP(msiof0_clk),
+               SH_PFC_PIN_GROUP(msiof0_sync),
+               SH_PFC_PIN_GROUP(msiof0_ss1),
+               SH_PFC_PIN_GROUP(msiof0_ss2),
+               SH_PFC_PIN_GROUP(msiof0_txd),
+               SH_PFC_PIN_GROUP(msiof0_rxd),
+               SH_PFC_PIN_GROUP(msiof1_clk_a),
+               SH_PFC_PIN_GROUP(msiof1_sync_a),
+               SH_PFC_PIN_GROUP(msiof1_ss1_a),
+               SH_PFC_PIN_GROUP(msiof1_ss2_a),
+               SH_PFC_PIN_GROUP(msiof1_txd_a),
+               SH_PFC_PIN_GROUP(msiof1_rxd_a),
+               SH_PFC_PIN_GROUP(msiof1_clk_b),
+               SH_PFC_PIN_GROUP(msiof1_sync_b),
+               SH_PFC_PIN_GROUP(msiof1_ss1_b),
+               SH_PFC_PIN_GROUP(msiof1_ss2_b),
+               SH_PFC_PIN_GROUP(msiof1_txd_b),
+               SH_PFC_PIN_GROUP(msiof1_rxd_b),
+               SH_PFC_PIN_GROUP(msiof1_clk_c),
+               SH_PFC_PIN_GROUP(msiof1_sync_c),
+               SH_PFC_PIN_GROUP(msiof1_ss1_c),
+               SH_PFC_PIN_GROUP(msiof1_ss2_c),
+               SH_PFC_PIN_GROUP(msiof1_txd_c),
+               SH_PFC_PIN_GROUP(msiof1_rxd_c),
+               SH_PFC_PIN_GROUP(msiof1_clk_d),
+               SH_PFC_PIN_GROUP(msiof1_sync_d),
+               SH_PFC_PIN_GROUP(msiof1_ss1_d),
+               SH_PFC_PIN_GROUP(msiof1_ss2_d),
+               SH_PFC_PIN_GROUP(msiof1_txd_d),
+               SH_PFC_PIN_GROUP(msiof1_rxd_d),
+               SH_PFC_PIN_GROUP(msiof1_clk_e),
+               SH_PFC_PIN_GROUP(msiof1_sync_e),
+               SH_PFC_PIN_GROUP(msiof1_ss1_e),
+               SH_PFC_PIN_GROUP(msiof1_ss2_e),
+               SH_PFC_PIN_GROUP(msiof1_txd_e),
+               SH_PFC_PIN_GROUP(msiof1_rxd_e),
+               SH_PFC_PIN_GROUP(msiof1_clk_f),
+               SH_PFC_PIN_GROUP(msiof1_sync_f),
+               SH_PFC_PIN_GROUP(msiof1_ss1_f),
+               SH_PFC_PIN_GROUP(msiof1_ss2_f),
+               SH_PFC_PIN_GROUP(msiof1_txd_f),
+               SH_PFC_PIN_GROUP(msiof1_rxd_f),
+               SH_PFC_PIN_GROUP(msiof1_clk_g),
+               SH_PFC_PIN_GROUP(msiof1_sync_g),
+               SH_PFC_PIN_GROUP(msiof1_ss1_g),
+               SH_PFC_PIN_GROUP(msiof1_ss2_g),
+               SH_PFC_PIN_GROUP(msiof1_txd_g),
+               SH_PFC_PIN_GROUP(msiof1_rxd_g),
+               SH_PFC_PIN_GROUP(msiof2_clk_a),
+               SH_PFC_PIN_GROUP(msiof2_sync_a),
+               SH_PFC_PIN_GROUP(msiof2_ss1_a),
+               SH_PFC_PIN_GROUP(msiof2_ss2_a),
+               SH_PFC_PIN_GROUP(msiof2_txd_a),
+               SH_PFC_PIN_GROUP(msiof2_rxd_a),
+               SH_PFC_PIN_GROUP(msiof2_clk_b),
+               SH_PFC_PIN_GROUP(msiof2_sync_b),
+               SH_PFC_PIN_GROUP(msiof2_ss1_b),
+               SH_PFC_PIN_GROUP(msiof2_ss2_b),
+               SH_PFC_PIN_GROUP(msiof2_txd_b),
+               SH_PFC_PIN_GROUP(msiof2_rxd_b),
+               SH_PFC_PIN_GROUP(msiof2_clk_c),
+               SH_PFC_PIN_GROUP(msiof2_sync_c),
+               SH_PFC_PIN_GROUP(msiof2_ss1_c),
+               SH_PFC_PIN_GROUP(msiof2_ss2_c),
+               SH_PFC_PIN_GROUP(msiof2_txd_c),
+               SH_PFC_PIN_GROUP(msiof2_rxd_c),
+               SH_PFC_PIN_GROUP(msiof2_clk_d),
+               SH_PFC_PIN_GROUP(msiof2_sync_d),
+               SH_PFC_PIN_GROUP(msiof2_ss1_d),
+               SH_PFC_PIN_GROUP(msiof2_ss2_d),
+               SH_PFC_PIN_GROUP(msiof2_txd_d),
+               SH_PFC_PIN_GROUP(msiof2_rxd_d),
+               SH_PFC_PIN_GROUP(msiof3_clk_a),
+               SH_PFC_PIN_GROUP(msiof3_sync_a),
+               SH_PFC_PIN_GROUP(msiof3_ss1_a),
+               SH_PFC_PIN_GROUP(msiof3_ss2_a),
+               SH_PFC_PIN_GROUP(msiof3_txd_a),
+               SH_PFC_PIN_GROUP(msiof3_rxd_a),
+               SH_PFC_PIN_GROUP(msiof3_clk_b),
+               SH_PFC_PIN_GROUP(msiof3_sync_b),
+               SH_PFC_PIN_GROUP(msiof3_ss1_b),
+               SH_PFC_PIN_GROUP(msiof3_ss2_b),
+               SH_PFC_PIN_GROUP(msiof3_txd_b),
+               SH_PFC_PIN_GROUP(msiof3_rxd_b),
+               SH_PFC_PIN_GROUP(msiof3_clk_c),
+               SH_PFC_PIN_GROUP(msiof3_sync_c),
+               SH_PFC_PIN_GROUP(msiof3_txd_c),
+               SH_PFC_PIN_GROUP(msiof3_rxd_c),
+               SH_PFC_PIN_GROUP(msiof3_clk_d),
+               SH_PFC_PIN_GROUP(msiof3_sync_d),
+               SH_PFC_PIN_GROUP(msiof3_ss1_d),
+               SH_PFC_PIN_GROUP(msiof3_txd_d),
+               SH_PFC_PIN_GROUP(msiof3_rxd_d),
+               SH_PFC_PIN_GROUP(msiof3_clk_e),
+               SH_PFC_PIN_GROUP(msiof3_sync_e),
+               SH_PFC_PIN_GROUP(msiof3_ss1_e),
+               SH_PFC_PIN_GROUP(msiof3_ss2_e),
+               SH_PFC_PIN_GROUP(msiof3_txd_e),
+               SH_PFC_PIN_GROUP(msiof3_rxd_e),
+               SH_PFC_PIN_GROUP(pwm0),
+               SH_PFC_PIN_GROUP(pwm1_a),
+               SH_PFC_PIN_GROUP(pwm1_b),
+               SH_PFC_PIN_GROUP(pwm2_a),
+               SH_PFC_PIN_GROUP(pwm2_b),
+               SH_PFC_PIN_GROUP(pwm3_a),
+               SH_PFC_PIN_GROUP(pwm3_b),
+               SH_PFC_PIN_GROUP(pwm4_a),
+               SH_PFC_PIN_GROUP(pwm4_b),
+               SH_PFC_PIN_GROUP(pwm5_a),
+               SH_PFC_PIN_GROUP(pwm5_b),
+               SH_PFC_PIN_GROUP(pwm6_a),
+               SH_PFC_PIN_GROUP(pwm6_b),
+               SH_PFC_PIN_GROUP(sata0_devslp_a),
+               SH_PFC_PIN_GROUP(sata0_devslp_b),
+               SH_PFC_PIN_GROUP(scif0_data),
+               SH_PFC_PIN_GROUP(scif0_clk),
+               SH_PFC_PIN_GROUP(scif0_ctrl),
+               SH_PFC_PIN_GROUP(scif1_data_a),
+               SH_PFC_PIN_GROUP(scif1_clk),
+               SH_PFC_PIN_GROUP(scif1_ctrl),
+               SH_PFC_PIN_GROUP(scif1_data_b),
+               SH_PFC_PIN_GROUP(scif2_data_a),
+               SH_PFC_PIN_GROUP(scif2_clk),
+               SH_PFC_PIN_GROUP(scif2_data_b),
+               SH_PFC_PIN_GROUP(scif3_data_a),
+               SH_PFC_PIN_GROUP(scif3_clk),
+               SH_PFC_PIN_GROUP(scif3_ctrl),
+               SH_PFC_PIN_GROUP(scif3_data_b),
+               SH_PFC_PIN_GROUP(scif4_data_a),
+               SH_PFC_PIN_GROUP(scif4_clk_a),
+               SH_PFC_PIN_GROUP(scif4_ctrl_a),
+               SH_PFC_PIN_GROUP(scif4_data_b),
+               SH_PFC_PIN_GROUP(scif4_clk_b),
+               SH_PFC_PIN_GROUP(scif4_ctrl_b),
+               SH_PFC_PIN_GROUP(scif4_data_c),
+               SH_PFC_PIN_GROUP(scif4_clk_c),
+               SH_PFC_PIN_GROUP(scif4_ctrl_c),
+               SH_PFC_PIN_GROUP(scif5_data_a),
+               SH_PFC_PIN_GROUP(scif5_clk_a),
+               SH_PFC_PIN_GROUP(scif5_data_b),
+               SH_PFC_PIN_GROUP(scif5_clk_b),
+               SH_PFC_PIN_GROUP(scif_clk_a),
+               SH_PFC_PIN_GROUP(scif_clk_b),
+               SH_PFC_PIN_GROUP(sdhi0_data1),
+               SH_PFC_PIN_GROUP(sdhi0_data4),
+               SH_PFC_PIN_GROUP(sdhi0_ctrl),
+               SH_PFC_PIN_GROUP(sdhi0_cd),
+               SH_PFC_PIN_GROUP(sdhi0_wp),
+               SH_PFC_PIN_GROUP(sdhi1_data1),
+               SH_PFC_PIN_GROUP(sdhi1_data4),
+               SH_PFC_PIN_GROUP(sdhi1_ctrl),
+               SH_PFC_PIN_GROUP(sdhi1_cd),
+               SH_PFC_PIN_GROUP(sdhi1_wp),
+               SH_PFC_PIN_GROUP(sdhi2_data1),
+               SH_PFC_PIN_GROUP(sdhi2_data4),
+               SH_PFC_PIN_GROUP(sdhi2_data8),
+               SH_PFC_PIN_GROUP(sdhi2_ctrl),
+               SH_PFC_PIN_GROUP(sdhi2_cd_a),
+               SH_PFC_PIN_GROUP(sdhi2_wp_a),
+               SH_PFC_PIN_GROUP(sdhi2_cd_b),
+               SH_PFC_PIN_GROUP(sdhi2_wp_b),
+               SH_PFC_PIN_GROUP(sdhi2_ds),
+               SH_PFC_PIN_GROUP(sdhi3_data1),
+               SH_PFC_PIN_GROUP(sdhi3_data4),
+               SH_PFC_PIN_GROUP(sdhi3_data8),
+               SH_PFC_PIN_GROUP(sdhi3_ctrl),
+               SH_PFC_PIN_GROUP(sdhi3_cd),
+               SH_PFC_PIN_GROUP(sdhi3_wp),
+               SH_PFC_PIN_GROUP(sdhi3_ds),
+               SH_PFC_PIN_GROUP(ssi0_data),
+               SH_PFC_PIN_GROUP(ssi01239_ctrl),
+               SH_PFC_PIN_GROUP(ssi1_data_a),
+               SH_PFC_PIN_GROUP(ssi1_data_b),
+               SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+               SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+               SH_PFC_PIN_GROUP(ssi2_data_a),
+               SH_PFC_PIN_GROUP(ssi2_data_b),
+               SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+               SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+               SH_PFC_PIN_GROUP(ssi3_data),
+               SH_PFC_PIN_GROUP(ssi349_ctrl),
+               SH_PFC_PIN_GROUP(ssi4_data),
+               SH_PFC_PIN_GROUP(ssi4_ctrl),
+               SH_PFC_PIN_GROUP(ssi5_data),
+               SH_PFC_PIN_GROUP(ssi5_ctrl),
+               SH_PFC_PIN_GROUP(ssi6_data),
+               SH_PFC_PIN_GROUP(ssi6_ctrl),
+               SH_PFC_PIN_GROUP(ssi7_data),
+               SH_PFC_PIN_GROUP(ssi78_ctrl),
+               SH_PFC_PIN_GROUP(ssi8_data),
+               SH_PFC_PIN_GROUP(ssi9_data_a),
+               SH_PFC_PIN_GROUP(ssi9_data_b),
+               SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+               SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+               SH_PFC_PIN_GROUP(tmu_tclk1_a),
+               SH_PFC_PIN_GROUP(tmu_tclk1_b),
+               SH_PFC_PIN_GROUP(tmu_tclk2_a),
+               SH_PFC_PIN_GROUP(tmu_tclk2_b),
+               SH_PFC_PIN_GROUP(tpu_to0),
+               SH_PFC_PIN_GROUP(tpu_to1),
+               SH_PFC_PIN_GROUP(tpu_to2),
+               SH_PFC_PIN_GROUP(tpu_to3),
+               SH_PFC_PIN_GROUP(usb0),
+               SH_PFC_PIN_GROUP(usb1),
+               SH_PFC_PIN_GROUP(usb2),
+               SH_PFC_PIN_GROUP(usb2_ch3),
+               SH_PFC_PIN_GROUP(usb30),
+               VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+               VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+               VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+               VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+               SH_PFC_PIN_GROUP(vin4_data18_a),
+               VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+               VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+               VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+               VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+               VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+               VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+               SH_PFC_PIN_GROUP(vin4_data18_b),
+               VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+               VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+               SH_PFC_PIN_GROUP(vin4_sync),
+               SH_PFC_PIN_GROUP(vin4_field),
+               SH_PFC_PIN_GROUP(vin4_clkenb),
+               SH_PFC_PIN_GROUP(vin4_clk),
+               VIN_DATA_PIN_GROUP(vin5_data, 8),
+               VIN_DATA_PIN_GROUP(vin5_data, 10),
+               VIN_DATA_PIN_GROUP(vin5_data, 12),
+               VIN_DATA_PIN_GROUP(vin5_data, 16),
+               SH_PFC_PIN_GROUP(vin5_sync),
+               SH_PFC_PIN_GROUP(vin5_field),
+               SH_PFC_PIN_GROUP(vin5_clkenb),
+               SH_PFC_PIN_GROUP(vin5_clk),
+       },
+       .automotive = {
+               SH_PFC_PIN_GROUP(drif0_ctrl_a),
+               SH_PFC_PIN_GROUP(drif0_data0_a),
+               SH_PFC_PIN_GROUP(drif0_data1_a),
+               SH_PFC_PIN_GROUP(drif0_ctrl_b),
+               SH_PFC_PIN_GROUP(drif0_data0_b),
+               SH_PFC_PIN_GROUP(drif0_data1_b),
+               SH_PFC_PIN_GROUP(drif0_ctrl_c),
+               SH_PFC_PIN_GROUP(drif0_data0_c),
+               SH_PFC_PIN_GROUP(drif0_data1_c),
+               SH_PFC_PIN_GROUP(drif1_ctrl_a),
+               SH_PFC_PIN_GROUP(drif1_data0_a),
+               SH_PFC_PIN_GROUP(drif1_data1_a),
+               SH_PFC_PIN_GROUP(drif1_ctrl_b),
+               SH_PFC_PIN_GROUP(drif1_data0_b),
+               SH_PFC_PIN_GROUP(drif1_data1_b),
+               SH_PFC_PIN_GROUP(drif1_ctrl_c),
+               SH_PFC_PIN_GROUP(drif1_data0_c),
+               SH_PFC_PIN_GROUP(drif1_data1_c),
+               SH_PFC_PIN_GROUP(drif2_ctrl_a),
+               SH_PFC_PIN_GROUP(drif2_data0_a),
+               SH_PFC_PIN_GROUP(drif2_data1_a),
+               SH_PFC_PIN_GROUP(drif2_ctrl_b),
+               SH_PFC_PIN_GROUP(drif2_data0_b),
+               SH_PFC_PIN_GROUP(drif2_data1_b),
+               SH_PFC_PIN_GROUP(drif3_ctrl_a),
+               SH_PFC_PIN_GROUP(drif3_data0_a),
+               SH_PFC_PIN_GROUP(drif3_data1_a),
+               SH_PFC_PIN_GROUP(drif3_ctrl_b),
+               SH_PFC_PIN_GROUP(drif3_data0_b),
+               SH_PFC_PIN_GROUP(drif3_data1_b),
+       }
+
 };
 
 static const char * const audio_clk_groups[] = {
@@ -5031,64 +5039,72 @@ static const char * const vin5_groups[] = {
        "vin5_clk",
 };
 
-static const struct sh_pfc_function pinmux_functions[] = {
-       SH_PFC_FUNCTION(audio_clk),
-       SH_PFC_FUNCTION(avb),
-       SH_PFC_FUNCTION(can0),
-       SH_PFC_FUNCTION(can1),
-       SH_PFC_FUNCTION(can_clk),
-       SH_PFC_FUNCTION(canfd0),
-       SH_PFC_FUNCTION(canfd1),
-       SH_PFC_FUNCTION(drif0),
-       SH_PFC_FUNCTION(drif1),
-       SH_PFC_FUNCTION(drif2),
-       SH_PFC_FUNCTION(drif3),
-       SH_PFC_FUNCTION(du),
-       SH_PFC_FUNCTION(hscif0),
-       SH_PFC_FUNCTION(hscif1),
-       SH_PFC_FUNCTION(hscif2),
-       SH_PFC_FUNCTION(hscif3),
-       SH_PFC_FUNCTION(hscif4),
-       SH_PFC_FUNCTION(i2c0),
-       SH_PFC_FUNCTION(i2c1),
-       SH_PFC_FUNCTION(i2c2),
-       SH_PFC_FUNCTION(i2c3),
-       SH_PFC_FUNCTION(i2c5),
-       SH_PFC_FUNCTION(i2c6),
-       SH_PFC_FUNCTION(intc_ex),
-       SH_PFC_FUNCTION(msiof0),
-       SH_PFC_FUNCTION(msiof1),
-       SH_PFC_FUNCTION(msiof2),
-       SH_PFC_FUNCTION(msiof3),
-       SH_PFC_FUNCTION(pwm0),
-       SH_PFC_FUNCTION(pwm1),
-       SH_PFC_FUNCTION(pwm2),
-       SH_PFC_FUNCTION(pwm3),
-       SH_PFC_FUNCTION(pwm4),
-       SH_PFC_FUNCTION(pwm5),
-       SH_PFC_FUNCTION(pwm6),
-       SH_PFC_FUNCTION(sata0),
-       SH_PFC_FUNCTION(scif0),
-       SH_PFC_FUNCTION(scif1),
-       SH_PFC_FUNCTION(scif2),
-       SH_PFC_FUNCTION(scif3),
-       SH_PFC_FUNCTION(scif4),
-       SH_PFC_FUNCTION(scif5),
-       SH_PFC_FUNCTION(scif_clk),
-       SH_PFC_FUNCTION(sdhi0),
-       SH_PFC_FUNCTION(sdhi1),
-       SH_PFC_FUNCTION(sdhi2),
-       SH_PFC_FUNCTION(sdhi3),
-       SH_PFC_FUNCTION(ssi),
-       SH_PFC_FUNCTION(tmu),
-       SH_PFC_FUNCTION(tpu),
-       SH_PFC_FUNCTION(usb0),
-       SH_PFC_FUNCTION(usb1),
-       SH_PFC_FUNCTION(usb2),
-       SH_PFC_FUNCTION(usb2_ch3),
-       SH_PFC_FUNCTION(usb30),
-       SH_PFC_FUNCTION(vin4),
-       SH_PFC_FUNCTION(vin5),
+static const struct {
+       struct sh_pfc_function common[53];
+       struct sh_pfc_function automotive[4];
+} pinmux_functions = {
+       .common = {
+               SH_PFC_FUNCTION(audio_clk),
+               SH_PFC_FUNCTION(avb),
+               SH_PFC_FUNCTION(can0),
+               SH_PFC_FUNCTION(can1),
+               SH_PFC_FUNCTION(can_clk),
+               SH_PFC_FUNCTION(canfd0),
+               SH_PFC_FUNCTION(canfd1),
+               SH_PFC_FUNCTION(du),
+               SH_PFC_FUNCTION(hscif0),
+               SH_PFC_FUNCTION(hscif1),
+               SH_PFC_FUNCTION(hscif2),
+               SH_PFC_FUNCTION(hscif3),
+               SH_PFC_FUNCTION(hscif4),
+               SH_PFC_FUNCTION(i2c0),
+               SH_PFC_FUNCTION(i2c1),
+               SH_PFC_FUNCTION(i2c2),
+               SH_PFC_FUNCTION(i2c3),
+               SH_PFC_FUNCTION(i2c5),
+               SH_PFC_FUNCTION(i2c6),
+               SH_PFC_FUNCTION(intc_ex),
+               SH_PFC_FUNCTION(msiof0),
+               SH_PFC_FUNCTION(msiof1),
+               SH_PFC_FUNCTION(msiof2),
+               SH_PFC_FUNCTION(msiof3),
+               SH_PFC_FUNCTION(pwm0),
+               SH_PFC_FUNCTION(pwm1),
+               SH_PFC_FUNCTION(pwm2),
+               SH_PFC_FUNCTION(pwm3),
+               SH_PFC_FUNCTION(pwm4),
+               SH_PFC_FUNCTION(pwm5),
+               SH_PFC_FUNCTION(pwm6),
+               SH_PFC_FUNCTION(sata0),
+               SH_PFC_FUNCTION(scif0),
+               SH_PFC_FUNCTION(scif1),
+               SH_PFC_FUNCTION(scif2),
+               SH_PFC_FUNCTION(scif3),
+               SH_PFC_FUNCTION(scif4),
+               SH_PFC_FUNCTION(scif5),
+               SH_PFC_FUNCTION(scif_clk),
+               SH_PFC_FUNCTION(sdhi0),
+               SH_PFC_FUNCTION(sdhi1),
+               SH_PFC_FUNCTION(sdhi2),
+               SH_PFC_FUNCTION(sdhi3),
+               SH_PFC_FUNCTION(ssi),
+               SH_PFC_FUNCTION(tmu),
+               SH_PFC_FUNCTION(tpu),
+               SH_PFC_FUNCTION(usb0),
+               SH_PFC_FUNCTION(usb1),
+               SH_PFC_FUNCTION(usb2),
+               SH_PFC_FUNCTION(usb2_ch3),
+               SH_PFC_FUNCTION(usb30),
+               SH_PFC_FUNCTION(vin4),
+               SH_PFC_FUNCTION(vin5),
+       },
+       .automotive = {
+               SH_PFC_FUNCTION(drif0),
+               SH_PFC_FUNCTION(drif1),
+               SH_PFC_FUNCTION(drif2),
+               SH_PFC_FUNCTION(drif3),
+       }
+
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5777,7 +5793,9 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
                { PIN_DU_DOTCLKIN1,    0, 2 },  /* DU_DOTCLKIN1 */
        } },
        { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
                { PIN_DU_DOTCLKIN2,   28, 2 },  /* DU_DOTCLKIN2 */
+#endif
                { PIN_DU_DOTCLKIN3,   24, 2 },  /* DU_DOTCLKIN3 */
                { PIN_FSCLKST_N,      20, 2 },  /* FSCLKST# */
                { PIN_TMS,             4, 2 },  /* TMS */
@@ -5898,8 +5916,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
                { RCAR_GP_PIN(6, 27), 20, 3 },  /* USB1_OVC */
                { RCAR_GP_PIN(6, 28), 16, 3 },  /* USB30_PWEN */
                { RCAR_GP_PIN(6, 29), 12, 3 },  /* USB30_OVC */
-               { RCAR_GP_PIN(6, 30),  8, 3 },  /* USB2_CH3_PWEN */
-               { RCAR_GP_PIN(6, 31),  4, 3 },  /* USB2_CH3_OVC */
+               { RCAR_GP_PIN(6, 30),  8, 3 },  /* GP6_30/USB2_CH3_PWEN */
+               { RCAR_GP_PIN(6, 31),  4, 3 },  /* GP6_31/USB2_CH3_OVC */
        } },
        { },
 };
@@ -6220,6 +6238,32 @@ static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = {
        .set_bias = r8a77951_pinmux_set_bias,
 };
 
+#ifdef CONFIG_PINCTRL_PFC_R8A774E1
+const struct sh_pfc_soc_info r8a774e1_pinmux_info = {
+       .name = "r8a774e1_pfc",
+       .ops = &r8a77951_pinmux_ops,
+       .unlock_reg = 0xe6060000, /* PMMR */
+
+       .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+       .pins = pinmux_pins,
+       .nr_pins = ARRAY_SIZE(pinmux_pins),
+       .groups = pinmux_groups.common,
+       .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+       .functions = pinmux_functions.common,
+       .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+       .cfg_regs = pinmux_config_regs,
+       .drive_regs = pinmux_drive_regs,
+       .bias_regs = pinmux_bias_regs,
+       .ioctrl_regs = pinmux_ioctrl_regs,
+
+       .pinmux_data = pinmux_data,
+       .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77951
 const struct sh_pfc_soc_info r8a77951_pinmux_info = {
        .name = "r8a77951_pfc",
        .ops = &r8a77951_pinmux_ops,
@@ -6229,10 +6273,12 @@ const struct sh_pfc_soc_info r8a77951_pinmux_info = {
 
        .pins = pinmux_pins,
        .nr_pins = ARRAY_SIZE(pinmux_pins),
-       .groups = pinmux_groups,
-       .nr_groups = ARRAY_SIZE(pinmux_groups),
-       .functions = pinmux_functions,
-       .nr_functions = ARRAY_SIZE(pinmux_functions),
+       .groups = pinmux_groups.common,
+       .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+                       ARRAY_SIZE(pinmux_groups.automotive),
+       .functions = pinmux_functions.common,
+       .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+                       ARRAY_SIZE(pinmux_functions.automotive),
 
        .cfg_regs = pinmux_config_regs,
        .drive_regs = pinmux_drive_regs,
@@ -6242,3 +6288,4 @@ const struct sh_pfc_soc_info r8a77951_pinmux_info = {
        .pinmux_data = pinmux_data,
        .pinmux_data_size = ARRAY_SIZE(pinmux_data),
 };
+#endif
index 25e27b6..9f7d9c9 100644 (file)
@@ -1416,6 +1416,64 @@ static const unsigned int qspi1_data4_mux[] = {
        QSPI1_IO2_MARK, QSPI1_IO3_MARK
 };
 
+/* - RPC -------------------------------------------------------------------- */
+static const unsigned int rpc_clk1_pins[] = {
+       /* Octal-SPI flash: C/SCLK */
+       RCAR_GP_PIN(5, 0),
+};
+static const unsigned int rpc_clk1_mux[] = {
+       QSPI0_SPCLK_MARK,
+};
+static const unsigned int rpc_clk2_pins[] = {
+       /* HyperFlash: CK, CK# */
+       RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int rpc_clk2_mux[] = {
+       QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK,
+};
+static const unsigned int rpc_ctrl_pins[] = {
+       /* Octal-SPI flash: S#/CS, DQS */
+       /* HyperFlash: CS#, RDS */
+       RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int rpc_ctrl_mux[] = {
+       QSPI0_SSL_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int rpc_data_pins[] = {
+       /* DQ[0:7] */
+       RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+       RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4),
+       RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+       RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int rpc_data_mux[] = {
+       QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+       QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+       QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+       QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+static const unsigned int rpc_reset_pins[] = {
+       /* RPC_RESET# */
+       RCAR_GP_PIN(5, 12),
+};
+static const unsigned int rpc_reset_mux[] = {
+       RPC_RESET_N_MARK,
+};
+static const unsigned int rpc_int_pins[] = {
+       /* RPC_INT# */
+       RCAR_GP_PIN(5, 14),
+};
+static const unsigned int rpc_int_mux[] = {
+       RPC_INT_N_MARK,
+};
+static const unsigned int rpc_wp_pins[] = {
+       /* RPC_WP# */
+       RCAR_GP_PIN(5, 13),
+};
+static const unsigned int rpc_wp_mux[] = {
+       RPC_WP_N_MARK,
+};
+
 /* - SCIF Clock ------------------------------------------------------------- */
 static const unsigned int scif_clk_a_pins[] = {
        /* SCIF_CLK */
@@ -1750,6 +1808,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(qspi1_ctrl),
        SH_PFC_PIN_GROUP(qspi1_data2),
        SH_PFC_PIN_GROUP(qspi1_data4),
+       SH_PFC_PIN_GROUP(rpc_clk1),
+       SH_PFC_PIN_GROUP(rpc_clk2),
+       SH_PFC_PIN_GROUP(rpc_ctrl),
+       SH_PFC_PIN_GROUP(rpc_data),
+       SH_PFC_PIN_GROUP(rpc_reset),
+       SH_PFC_PIN_GROUP(rpc_int),
+       SH_PFC_PIN_GROUP(rpc_wp),
        SH_PFC_PIN_GROUP(scif_clk_a),
        SH_PFC_PIN_GROUP(scif_clk_b),
        SH_PFC_PIN_GROUP(scif0_data),
@@ -1954,6 +2019,16 @@ static const char * const qspi1_groups[] = {
        "qspi1_data4",
 };
 
+static const char * const rpc_groups[] = {
+       "rpc_clk1",
+       "rpc_clk2",
+       "rpc_ctrl",
+       "rpc_data",
+       "rpc_reset",
+       "rpc_int",
+       "rpc_wp",
+};
+
 static const char * const scif_clk_groups[] = {
        "scif_clk_a",
        "scif_clk_b",
@@ -2039,6 +2114,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(pwm4),
        SH_PFC_FUNCTION(qspi0),
        SH_PFC_FUNCTION(qspi1),
+       SH_PFC_FUNCTION(rpc),
        SH_PFC_FUNCTION(scif_clk),
        SH_PFC_FUNCTION(scif0),
        SH_PFC_FUNCTION(scif1),
index 14fe403..1055f98 100644 (file)
@@ -1710,6 +1710,64 @@ static const unsigned int qspi1_data4_mux[] = {
        QSPI1_IO2_MARK, QSPI1_IO3_MARK
 };
 
+/* - RPC -------------------------------------------------------------------- */
+static const unsigned int rpc_clk1_pins[] = {
+       /* Octal-SPI flash: C/SCLK */
+       RCAR_GP_PIN(5, 0),
+};
+static const unsigned int rpc_clk1_mux[] = {
+       QSPI0_SPCLK_MARK,
+};
+static const unsigned int rpc_clk2_pins[] = {
+       /* HyperFlash: CK, CK# */
+       RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int rpc_clk2_mux[] = {
+       QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK,
+};
+static const unsigned int rpc_ctrl_pins[] = {
+       /* Octal-SPI flash: S#/CS, DQS */
+       /* HyperFlash: CS#, RDS */
+       RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int rpc_ctrl_mux[] = {
+       QSPI0_SSL_MARK, QSPI1_SSL_MARK,
+};
+static const unsigned int rpc_data_pins[] = {
+       /* DQ[0:7] */
+       RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+       RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4),
+       RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+       RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int rpc_data_mux[] = {
+       QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK,
+       QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+       QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK,
+       QSPI1_IO2_MARK, QSPI1_IO3_MARK,
+};
+static const unsigned int rpc_reset_pins[] = {
+       /* RPC_RESET# */
+       RCAR_GP_PIN(5, 12),
+};
+static const unsigned int rpc_reset_mux[] = {
+       RPC_RESET_N_MARK,
+};
+static const unsigned int rpc_int_pins[] = {
+       /* RPC_INT# */
+       RCAR_GP_PIN(5, 14),
+};
+static const unsigned int rpc_int_mux[] = {
+       RPC_INT_N_MARK,
+};
+static const unsigned int rpc_wp_pins[] = {
+       /* RPC_WP# */
+       RCAR_GP_PIN(5, 13),
+};
+static const unsigned int rpc_wp_mux[] = {
+       RPC_WP_N_MARK,
+};
+
 /* - SCIF0 ------------------------------------------------------------------ */
 static const unsigned int scif0_data_pins[] = {
        /* RX0, TX0 */
@@ -2126,6 +2184,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(qspi1_ctrl),
        SH_PFC_PIN_GROUP(qspi1_data2),
        SH_PFC_PIN_GROUP(qspi1_data4),
+       SH_PFC_PIN_GROUP(rpc_clk1),
+       SH_PFC_PIN_GROUP(rpc_clk2),
+       SH_PFC_PIN_GROUP(rpc_ctrl),
+       SH_PFC_PIN_GROUP(rpc_data),
+       SH_PFC_PIN_GROUP(rpc_reset),
+       SH_PFC_PIN_GROUP(rpc_int),
+       SH_PFC_PIN_GROUP(rpc_wp),
        SH_PFC_PIN_GROUP(scif0_data),
        SH_PFC_PIN_GROUP(scif0_clk),
        SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -2362,6 +2427,16 @@ static const char * const qspi1_groups[] = {
        "qspi1_data4",
 };
 
+static const char * const rpc_groups[] = {
+       "rpc_clk1",
+       "rpc_clk2",
+       "rpc_ctrl",
+       "rpc_data",
+       "rpc_reset",
+       "rpc_int",
+       "rpc_wp",
+};
+
 static const char * const scif0_groups[] = {
        "scif0_data",
        "scif0_clk",
@@ -2460,6 +2535,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(pwm4),
        SH_PFC_FUNCTION(qspi0),
        SH_PFC_FUNCTION(qspi1),
+       SH_PFC_FUNCTION(rpc),
        SH_PFC_FUNCTION(scif0),
        SH_PFC_FUNCTION(scif1),
        SH_PFC_FUNCTION(scif3),
index 0f01382..eff1bb8 100644 (file)
@@ -312,6 +312,7 @@ extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
 extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
 extern const struct sh_pfc_soc_info r8a774b1_pinmux_info;
 extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774e1_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
 extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
index 50df9e0..e54a6e3 100644 (file)
@@ -169,7 +169,7 @@ struct dt_params {
 
 /**
  * struct atlas7_pad_conf - Atlas7 Pad Configuration
- * @id                 The ID of this Pad.
+ * @id:                        The ID of this Pad.
  * @type:              The type of this Pad.
  * @mux_reg:           The mux register offset.
  *                     This register contains the mux.
@@ -210,7 +210,7 @@ struct atlas7_pad_config {
                .ad_ctrl_bit = adb,                             \
        }
 
-/**
+/*
  * struct atlas7_pad_status - Atlas7 Pad status
  */
 struct atlas7_pad_status {
@@ -355,10 +355,6 @@ struct atlas7_gpio_chip {
        struct atlas7_gpio_bank banks[];
 };
 
-/**
- * @dev: a pointer back to containing device
- * @virtbase: the offset to the controller in virtual memory
- */
 struct atlas7_pmx {
        struct device *dev;
        struct pinctrl_dev *pctl;
@@ -376,7 +372,7 @@ struct atlas7_pmx {
  * refer to A7DA IO Summary - CS-314158-DD-4E.xls
  */
 
-/*Pads in IOC RTC & TOP */
+/* Pads in IOC RTC & TOP */
 static const struct pinctrl_pin_desc atlas7_ioc_pads[] = {
        /* RTC PADs */
        PINCTRL_PIN(0, "rtc_gpio_0"),
@@ -4781,10 +4777,10 @@ struct map_data {
 
 /**
  * struct atlas7_pull_info - Atlas7 Pad pull info
- * @type:The type of this Pad.
- * @mask:The mas value of this pin's pull bits.
- * @v2s: The map of pull register value to pull status.
- * @s2v: The map of pull status to pull register value.
+ * @pad_type:  The type of this Pad.
+ * @mask:      The mas value of this pin's pull bits.
+ * @v2s:       The map of pull register value to pull status.
+ * @s2v:       The map of pull status to pull register value.
  */
 struct atlas7_pull_info {
        u8 pad_type;
@@ -4908,6 +4904,7 @@ static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
  * @type:              The type of this Pad.
  * @mask:              The mask value of this pin's pull bits.
  * @imval:             The immediate value of drives trength register.
+ * @reserved:          Reserved space
  */
 struct atlas7_ds_info {
        u8 type;
@@ -5609,7 +5606,7 @@ static int __init atlas7_pinmux_init(void)
 arch_initcall(atlas7_pinmux_init);
 
 
-/**
+/*
  * The Following is GPIO Code
  */
 static inline struct
index a657cd8..7d9bded 100644 (file)
@@ -64,7 +64,7 @@
 #define gpio_range_to_bank(chip) \
                container_of(chip, struct stm32_gpio_bank, range)
 
-#define HWSPINLOCK_TIMEOUT     5 /* msec */
+#define HWSPNLCK_TIMEOUT       1000 /* usec */
 
 static const char * const stm32_gpio_functions[] = {
        "gpio", "af0", "af1",
@@ -84,6 +84,7 @@ struct stm32_pinctrl_group {
 struct stm32_gpio_bank {
        void __iomem *base;
        struct clk *clk;
+       struct reset_control *rstc;
        spinlock_t lock;
        struct gpio_chip gpio_chip;
        struct pinctrl_gpio_range range;
@@ -302,6 +303,7 @@ static const struct gpio_chip stm32_gpio_template = {
        .direction_output       = stm32_gpio_direction_output,
        .to_irq                 = stm32_gpio_to_irq,
        .get_direction          = stm32_gpio_get_direction,
+       .set_config             = gpiochip_generic_config,
 };
 
 static void stm32_gpio_irq_trigger(struct irq_data *d)
@@ -420,12 +422,14 @@ static int stm32_gpio_domain_activate(struct irq_domain *d,
         * to avoid overriding.
         */
        spin_lock_irqsave(&pctl->irqmux_lock, flags);
-       if (pctl->hwlock)
-               ret = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT);
 
-       if (ret) {
-               dev_err(pctl->dev, "Can't get hwspinlock\n");
-               goto unlock;
+       if (pctl->hwlock) {
+               ret = hwspin_lock_timeout_in_atomic(pctl->hwlock,
+                                                   HWSPNLCK_TIMEOUT);
+               if (ret) {
+                       dev_err(pctl->dev, "Can't get hwspinlock\n");
+                       goto unlock;
+               }
        }
 
        if (pctl->irqmux_map & BIT(irq_data->hwirq)) {
@@ -433,7 +437,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d,
                        irq_data->hwirq);
                ret = -EBUSY;
                if (pctl->hwlock)
-                       hwspin_unlock(pctl->hwlock);
+                       hwspin_unlock_in_atomic(pctl->hwlock);
                goto unlock;
        } else {
                pctl->irqmux_map |= BIT(irq_data->hwirq);
@@ -442,7 +446,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d,
        regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_ioport_nr);
 
        if (pctl->hwlock)
-               hwspin_unlock(pctl->hwlock);
+               hwspin_unlock_in_atomic(pctl->hwlock);
 
 unlock:
        spin_unlock_irqrestore(&pctl->irqmux_lock, flags);
@@ -750,12 +754,13 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
        clk_enable(bank->clk);
        spin_lock_irqsave(&bank->lock, flags);
 
-       if (pctl->hwlock)
-               err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT);
-
-       if (err) {
-               dev_err(pctl->dev, "Can't get hwspinlock\n");
-               goto unlock;
+       if (pctl->hwlock) {
+               err = hwspin_lock_timeout_in_atomic(pctl->hwlock,
+                                                   HWSPNLCK_TIMEOUT);
+               if (err) {
+                       dev_err(pctl->dev, "Can't get hwspinlock\n");
+                       goto unlock;
+               }
        }
 
        val = readl_relaxed(bank->base + alt_offset);
@@ -769,7 +774,7 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
        writel_relaxed(val, bank->base + STM32_GPIO_MODER);
 
        if (pctl->hwlock)
-               hwspin_unlock(pctl->hwlock);
+               hwspin_unlock_in_atomic(pctl->hwlock);
 
        stm32_gpio_backup_mode(bank, pin, mode, alt);
 
@@ -869,12 +874,13 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
        clk_enable(bank->clk);
        spin_lock_irqsave(&bank->lock, flags);
 
-       if (pctl->hwlock)
-               err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT);
-
-       if (err) {
-               dev_err(pctl->dev, "Can't get hwspinlock\n");
-               goto unlock;
+       if (pctl->hwlock) {
+               err = hwspin_lock_timeout_in_atomic(pctl->hwlock,
+                                                   HWSPNLCK_TIMEOUT);
+               if (err) {
+                       dev_err(pctl->dev, "Can't get hwspinlock\n");
+                       goto unlock;
+               }
        }
 
        val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
@@ -883,7 +889,7 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
        writel_relaxed(val, bank->base + STM32_GPIO_TYPER);
 
        if (pctl->hwlock)
-               hwspin_unlock(pctl->hwlock);
+               hwspin_unlock_in_atomic(pctl->hwlock);
 
        stm32_gpio_backup_driving(bank, offset, drive);
 
@@ -923,12 +929,13 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
        clk_enable(bank->clk);
        spin_lock_irqsave(&bank->lock, flags);
 
-       if (pctl->hwlock)
-               err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT);
-
-       if (err) {
-               dev_err(pctl->dev, "Can't get hwspinlock\n");
-               goto unlock;
+       if (pctl->hwlock) {
+               err = hwspin_lock_timeout_in_atomic(pctl->hwlock,
+                                                   HWSPNLCK_TIMEOUT);
+               if (err) {
+                       dev_err(pctl->dev, "Can't get hwspinlock\n");
+                       goto unlock;
+               }
        }
 
        val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
@@ -937,7 +944,7 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
        writel_relaxed(val, bank->base + STM32_GPIO_SPEEDR);
 
        if (pctl->hwlock)
-               hwspin_unlock(pctl->hwlock);
+               hwspin_unlock_in_atomic(pctl->hwlock);
 
        stm32_gpio_backup_speed(bank, offset, speed);
 
@@ -977,12 +984,13 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
        clk_enable(bank->clk);
        spin_lock_irqsave(&bank->lock, flags);
 
-       if (pctl->hwlock)
-               err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT);
-
-       if (err) {
-               dev_err(pctl->dev, "Can't get hwspinlock\n");
-               goto unlock;
+       if (pctl->hwlock) {
+               err = hwspin_lock_timeout_in_atomic(pctl->hwlock,
+                                                   HWSPNLCK_TIMEOUT);
+               if (err) {
+                       dev_err(pctl->dev, "Can't get hwspinlock\n");
+                       goto unlock;
+               }
        }
 
        val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
@@ -991,7 +999,7 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
        writel_relaxed(val, bank->base + STM32_GPIO_PUPDR);
 
        if (pctl->hwlock)
-               hwspin_unlock(pctl->hwlock);
+               hwspin_unlock_in_atomic(pctl->hwlock);
 
        stm32_gpio_backup_bias(bank, offset, bias);
 
@@ -1051,7 +1059,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
        struct stm32_gpio_bank *bank;
        int offset, ret = 0;
 
-       range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+       range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
        if (!range) {
                dev_err(pctl->dev, "No gpio range defined.\n");
                return -EINVAL;
@@ -1084,7 +1092,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
                ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
                break;
        default:
-               ret = -EINVAL;
+               ret = -ENOTSUPP;
        }
 
        return ret;
@@ -1109,9 +1117,11 @@ static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
        int i, ret;
 
        for (i = 0; i < num_configs; i++) {
+               mutex_lock(&pctldev->mutex);
                ret = stm32_pconf_parse_conf(pctldev, g->pin,
                        pinconf_to_config_param(configs[i]),
                        pinconf_to_config_argument(configs[i]));
+               mutex_unlock(&pctldev->mutex);
                if (ret < 0)
                        return ret;
 
@@ -1121,6 +1131,22 @@ static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
        return 0;
 }
 
+static int stm32_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+                          unsigned long *configs, unsigned int num_configs)
+{
+       int i, ret;
+
+       for (i = 0; i < num_configs; i++) {
+               ret = stm32_pconf_parse_conf(pctldev, pin,
+                               pinconf_to_config_param(configs[i]),
+                               pinconf_to_config_argument(configs[i]));
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
                                 struct seq_file *s,
                                 unsigned int pin)
@@ -1186,10 +1212,10 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
        }
 }
 
-
 static const struct pinconf_ops stm32_pconf_ops = {
        .pin_config_group_get   = stm32_pconf_group_get,
        .pin_config_group_set   = stm32_pconf_group_set,
+       .pin_config_set         = stm32_pconf_set,
        .pin_config_dbg_show    = stm32_pconf_dbg_show,
 };
 
@@ -1202,13 +1228,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
        struct of_phandle_args args;
        struct device *dev = pctl->dev;
        struct resource res;
-       struct reset_control *rstc;
        int npins = STM32_GPIO_PINS_PER_BANK;
        int bank_nr, err;
 
-       rstc = of_reset_control_get_exclusive(np, NULL);
-       if (!IS_ERR(rstc))
-               reset_control_deassert(rstc);
+       if (!IS_ERR(bank->rstc))
+               reset_control_deassert(bank->rstc);
 
        if (of_address_to_resource(np, 0, &res))
                return -ENODEV;
@@ -1217,12 +1241,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
        if (IS_ERR(bank->base))
                return PTR_ERR(bank->base);
 
-       bank->clk = of_clk_get_by_name(np, NULL);
-       if (IS_ERR(bank->clk)) {
-               dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk));
-               return PTR_ERR(bank->clk);
-       }
-
        err = clk_prepare(bank->clk);
        if (err) {
                dev_err(dev, "failed to prepare clk (%d)\n", err);
@@ -1517,6 +1535,28 @@ int stm32_pctl_probe(struct platform_device *pdev)
        if (!pctl->banks)
                return -ENOMEM;
 
+       i = 0;
+       for_each_available_child_of_node(np, child) {
+               struct stm32_gpio_bank *bank = &pctl->banks[i];
+
+               if (of_property_read_bool(child, "gpio-controller")) {
+                       bank->rstc = of_reset_control_get_exclusive(child,
+                                                                   NULL);
+                       if (PTR_ERR(bank->rstc) == -EPROBE_DEFER)
+                               return -EPROBE_DEFER;
+
+                       bank->clk = of_clk_get_by_name(child, NULL);
+                       if (IS_ERR(bank->clk)) {
+                               if (PTR_ERR(bank->clk) != -EPROBE_DEFER)
+                                       dev_err(dev,
+                                               "failed to get clk (%ld)\n",
+                                               PTR_ERR(bank->clk));
+                               return PTR_ERR(bank->clk);
+                       }
+                       i++;
+               }
+       }
+
        for_each_available_child_of_node(np, child) {
                if (of_property_read_bool(child, "gpio-controller")) {
                        ret = stm32_gpiolib_register_bank(pctl, child);
index 2e0b5f7..c94ba17 100644 (file)
@@ -98,7 +98,6 @@ static struct tegra_function tegra194_functions[] = {
                .sfsel_bit = 10,                                \
                .schmitt_bit = schmitt_b,                       \
                .drvtype_bit = 13,                              \
-               .drv_reg = -1,                                  \
                .parked_bitmask = 0
 
 #define drive_pex_l5_clkreq_n_pgg0                             \
index b522ca0..cfb9242 100644 (file)
@@ -2,7 +2,7 @@
  * Support for configuration of IO Delay module found on Texas Instruments SoCs
  * such as DRA7
  *
- * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index cf07215..a056031 100644 (file)
@@ -218,6 +218,7 @@ config CROS_EC_TYPEC
        tristate "ChromeOS EC Type-C Connector Control"
        depends on MFD_CROS_EC_DEV && TYPEC
        depends on CROS_USBPD_NOTIFY
+       depends on USB_ROLE_SWITCH
        default MFD_CROS_EC_DEV
        help
          If you say Y here, you get support for accessing Type C connector
index ecfada0..272c898 100644 (file)
@@ -242,6 +242,25 @@ static ssize_t cros_ec_pdinfo_read(struct file *file,
                                       read_buf, p - read_buf);
 }
 
+static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
+{
+       struct {
+               struct cros_ec_command cmd;
+               struct ec_response_uptime_info resp;
+       } __packed msg = {};
+       int ret;
+
+       msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
+       msg.cmd.insize = sizeof(msg.resp);
+
+       ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+       if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
+               return false;
+
+       /* Other errors maybe a transient error, do not rule about support. */
+       return true;
+}
+
 static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
                                   size_t count, loff_t *ppos)
 {
@@ -444,8 +463,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
        debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
                            &cros_ec_pdinfo_fops);
 
-       debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
-                           &cros_ec_uptime_fops);
+       if (cros_ec_uptime_is_supported(ec->ec_dev))
+               debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
+                                   &cros_ec_uptime_fops);
 
        debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
                           &ec->ec_dev->last_resume_result);
index ed794a7..8136402 100644 (file)
@@ -681,8 +681,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
 
        /* Register croc_ec_dev mfd */
        rv = cros_ec_dev_init(client_data);
-       if (rv)
+       if (rv) {
+               down_write(&init_lock);
                goto end_cros_ec_dev_init_error;
+       }
 
        return 0;
 
index 3e745e0..8d52b3b 100644 (file)
@@ -208,6 +208,12 @@ static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev,
        msg->insize = sizeof(*r);
 
        ret = send_command(ec_dev, msg);
+       if (ret >= 0) {
+               if (msg->result == EC_RES_INVALID_COMMAND)
+                       return -EOPNOTSUPP;
+               if (msg->result != EC_RES_SUCCESS)
+                       return -EPROTO;
+       }
        if (ret > 0) {
                r = (struct ec_response_host_event_mask *)msg->data;
                *mask = r->mask;
@@ -469,14 +475,33 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
                                                    &ver_mask);
        ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1)));
 
-       /*
-        * Get host event wake mask, assume all events are wake events
-        * if unavailable.
-        */
+       /* Get host event wake mask. */
        ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg,
                                               &ec_dev->host_event_wake_mask);
-       if (ret < 0)
-               ec_dev->host_event_wake_mask = U32_MAX;
+       if (ret < 0) {
+               /*
+                * If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
+                * use a reasonable default. Note that we ignore various
+                * battery, AC status, and power-state events, because (a)
+                * those can be quite common (e.g., when sitting at full
+                * charge, on AC) and (b) these are not actionable wake events;
+                * if anything, we'd like to continue suspending (to save
+                * power), not wake up.
+                */
+               ec_dev->host_event_wake_mask = U32_MAX &
+                       ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
+                         BIT(EC_HOST_EVENT_BATTERY_LOW) |
+                         BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
+                         BIT(EC_HOST_EVENT_PD_MCU) |
+                         BIT(EC_HOST_EVENT_BATTERY_STATUS));
+               /*
+                * Old ECs may not support this command. Complain about all
+                * other errors.
+                */
+               if (ret != -EOPNOTSUPP)
+                       dev_err(ec_dev->dev,
+                               "failed to retrieve wake mask: %d\n", ret);
+       }
 
        ret = 0;
 
@@ -496,8 +521,8 @@ EXPORT_SYMBOL(cros_ec_query_all);
  *
  * Return: 0 on success or negative error code.
  */
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
-                    struct cros_ec_command *msg)
+static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+                           struct cros_ec_command *msg)
 {
        int ret;
 
@@ -541,7 +566,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
 
        return ret;
 }
-EXPORT_SYMBOL(cros_ec_cmd_xfer);
 
 /**
  * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
index 7e8629e..30d0ba3 100644 (file)
@@ -38,6 +38,9 @@ struct cros_ec_rpmsg_response {
  * @rpdev:     rpmsg device we are connected to
  * @xfer_ack:  completion for host command transfer.
  * @host_event_work:   Work struct for pending host event.
+ * @ept: The rpmsg endpoint of this channel.
+ * @has_pending_host_event: Boolean used to check if there is a pending event.
+ * @probe_done: Flag to indicate that probe is done.
  */
 struct cros_ec_rpmsg {
        struct rpmsg_device *rpdev;
index 24e48d9..8921f24 100644 (file)
@@ -419,9 +419,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
                         * Disable filtering since we might add more jitter
                         * if b is in a random point in time.
                         */
-                       new_timestamp = fifo_timestamp -
-                                       fifo_info->timestamp  * 1000 +
-                                       in->timestamp * 1000;
+                       new_timestamp = c - b * 1000 + a * 1000;
                        /*
                         * The timestamp can be stale if we had to use the fifo
                         * info timestamp.
@@ -675,29 +673,22 @@ done_with_this_batch:
  * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
  * add to ringbuffer (legacy).
  *
- * Note: This assumes we're running old firmware, where every sample's timestamp
- * is after the sample. Run if tight_timestamps == false.
- *
- * If there is a sample with a proper timestamp
+ * Note: This assumes we're running old firmware, where timestamp
+ * is inserted after its sample(s)e. There can be several samples between
+ * timestamps, so several samples can have the same timestamp.
  *
  *                        timestamp | count
  *                        -----------------
- * older_unprocess_out --> TS1      | 1
- *                         TS1      | 2
- *                out -->  TS1      | 3
- *           next_out -->  TS2      |
- *
- * We spread time for the samples [older_unprocess_out .. out]
- * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ *          1st sample --> TS1      | 1
+ *                         TS2      | 2
+ *                         TS2      | 3
+ *                         TS3      | 4
+ *           last_out -->
  *
- * If we reach the end of the samples, we compare with the
- * current timestamp:
  *
- * older_unprocess_out --> TS1      | 1
- *                         TS1      | 2
- *                 out --> TS1      | 3
+ * We spread time for the samples using perod p = (current - TS1)/4.
+ * between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
  *
- * We know have [TS1+1/3, TS1+2/3, current timestamp]
  */
 static void
 cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
@@ -710,58 +701,37 @@ cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
        int i;
 
        for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
-               s64 older_timestamp;
                s64 timestamp;
-               struct cros_ec_sensors_ring_sample *older_unprocess_out =
-                       sensorhub->ring;
-               struct cros_ec_sensors_ring_sample *next_out;
-               int count = 1;
-
-               for (out = sensorhub->ring; out < last_out; out = next_out) {
-                       s64 time_period;
+               int count = 0;
+               s64 time_period;
 
-                       next_out = out + 1;
+               for (out = sensorhub->ring; out < last_out; out++) {
                        if (out->sensor_id != i)
                                continue;
 
                        /* Timestamp to start with */
-                       older_timestamp = out->timestamp;
-
-                       /* Find next sample. */
-                       while (next_out < last_out && next_out->sensor_id != i)
-                               next_out++;
+                       timestamp = out->timestamp;
+                       out++;
+                       count = 1;
+                       break;
+               }
+               for (; out < last_out; out++) {
+                       /* Find last sample. */
+                       if (out->sensor_id != i)
+                               continue;
+                       count++;
+               }
+               if (count == 0)
+                       continue;
 
-                       if (next_out >= last_out) {
-                               timestamp = current_timestamp;
-                       } else {
-                               timestamp = next_out->timestamp;
-                               if (timestamp == older_timestamp) {
-                                       count++;
-                                       continue;
-                               }
-                       }
+               /* Spread uniformly between the first and last samples. */
+               time_period = div_s64(current_timestamp - timestamp, count);
 
-                       /*
-                        * The next sample has a new timestamp, spread the
-                        * unprocessed samples.
-                        */
-                       if (next_out < last_out)
-                               count++;
-                       time_period = div_s64(timestamp - older_timestamp,
-                                             count);
-
-                       for (; older_unprocess_out <= out;
-                                       older_unprocess_out++) {
-                               if (older_unprocess_out->sensor_id != i)
-                                       continue;
-                               older_timestamp += time_period;
-                               older_unprocess_out->timestamp =
-                                       older_timestamp;
-                       }
-                       count = 1;
-                       /* The next_out sample has a valid timestamp, skip. */
-                       next_out++;
-                       older_unprocess_out = next_out;
+               for (out = sensorhub->ring; out < last_out; out++) {
+                       if (out->sensor_id != i)
+                               continue;
+                       timestamp += time_period;
+                       out->timestamp = timestamp;
                }
        }
 
index d092603..dfa1f81 100644 (file)
@@ -148,6 +148,10 @@ static int terminate_request(struct cros_ec_device *ec_dev)
  * receive_n_bytes - receive n bytes from the EC.
  *
  * Assumes buf is a pointer into the ec_dev->din buffer
+ *
+ * @ec_dev: ChromeOS EC device.
+ * @buf: Pointer to the buffer receiving the data.
+ * @n: Number of bytes received.
  */
 static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n)
 {
index 66b8d21..3fcd27e 100644 (file)
 #include <linux/platform_data/cros_usbpd_notify.h>
 #include <linux/platform_device.h>
 #include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/usb/role.h>
 
 #define DRV_NAME "cros-ec-typec"
 
+/* Supported alt modes. */
+enum {
+       CROS_EC_ALTMODE_DP = 0,
+       CROS_EC_ALTMODE_TBT,
+       CROS_EC_ALTMODE_MAX,
+};
+
 /* Per port data. */
 struct cros_typec_port {
        struct typec_port *port;
@@ -25,6 +37,16 @@ struct cros_typec_port {
        struct typec_partner *partner;
        /* Port partner PD identity info. */
        struct usb_pd_identity p_identity;
+       struct typec_switch *ori_sw;
+       struct typec_mux *mux;
+       struct usb_role_switch *role_sw;
+
+       /* Variables keeping track of switch state. */
+       struct typec_mux_state state;
+       uint8_t mux_flags;
+
+       /* Port alt modes. */
+       struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
 };
 
 /* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -32,10 +54,11 @@ struct cros_typec_data {
        struct device *dev;
        struct cros_ec_device *ec;
        int num_ports;
-       unsigned int cmd_ver;
+       unsigned int pd_ctrl_ver;
        /* Array of ports, indexed by port number. */
        struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
        struct notifier_block nb;
+       struct work_struct port_work;
 };
 
 static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -84,6 +107,81 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
        return 0;
 }
 
+static int cros_typec_get_switch_handles(struct cros_typec_port *port,
+                                        struct fwnode_handle *fwnode,
+                                        struct device *dev)
+{
+       port->mux = fwnode_typec_mux_get(fwnode, NULL);
+       if (IS_ERR(port->mux)) {
+               dev_dbg(dev, "Mux handle not found.\n");
+               goto mux_err;
+       }
+
+       port->ori_sw = fwnode_typec_switch_get(fwnode);
+       if (IS_ERR(port->ori_sw)) {
+               dev_dbg(dev, "Orientation switch handle not found.\n");
+               goto ori_sw_err;
+       }
+
+       port->role_sw = fwnode_usb_role_switch_get(fwnode);
+       if (IS_ERR(port->role_sw)) {
+               dev_dbg(dev, "USB role switch handle not found.\n");
+               goto role_sw_err;
+       }
+
+       return 0;
+
+role_sw_err:
+       usb_role_switch_put(port->role_sw);
+ori_sw_err:
+       typec_switch_put(port->ori_sw);
+mux_err:
+       typec_mux_put(port->mux);
+
+       return -ENODEV;
+}
+
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+                                 bool pd_en)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+       struct typec_partner_desc p_desc = {
+               .usb_pd = pd_en,
+       };
+       int ret = 0;
+
+       /*
+        * Fill an initial PD identity, which will then be updated with info
+        * from the EC.
+        */
+       p_desc.identity = &port->p_identity;
+
+       port->partner = typec_register_partner(port->port, &p_desc);
+       if (IS_ERR(port->partner)) {
+               ret = PTR_ERR(port->partner);
+               port->partner = NULL;
+       }
+
+       return ret;
+}
+
+static void cros_typec_remove_partner(struct cros_typec_data *typec,
+                                    int port_num)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+
+       port->state.alt = NULL;
+       port->state.mode = TYPEC_STATE_USB;
+       port->state.data = NULL;
+
+       usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
+       typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+       typec_mux_set(port->mux, &port->state);
+
+       typec_unregister_partner(port->partner);
+       port->partner = NULL;
+}
+
 static void cros_unregister_ports(struct cros_typec_data *typec)
 {
        int i;
@@ -91,10 +189,40 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
        for (i = 0; i < typec->num_ports; i++) {
                if (!typec->ports[i])
                        continue;
+               cros_typec_remove_partner(typec, i);
+               usb_role_switch_put(typec->ports[i]->role_sw);
+               typec_switch_put(typec->ports[i]->ori_sw);
+               typec_mux_put(typec->ports[i]->mux);
                typec_unregister_port(typec->ports[i]->port);
        }
 }
 
+/*
+ * Fake the alt mode structs until we actually start registering Type C port
+ * and partner alt modes.
+ */
+static void cros_typec_register_port_altmodes(struct cros_typec_data *typec,
+                                             int port_num)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+
+       /* All PD capable CrOS devices are assumed to support DP altmode. */
+       port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID;
+       port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE;
+
+       /*
+        * Register TBT compatibility alt mode. The EC will not enter the mode
+        * if it doesn't support it, so it's safe to register it unconditionally
+        * here for now.
+        */
+       port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID;
+       port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE;
+
+       port->state.alt = NULL;
+       port->state.mode = TYPEC_STATE_USB;
+       port->state.data = NULL;
+}
+
 static int cros_typec_init_ports(struct cros_typec_data *typec)
 {
        struct device *dev = typec->dev;
@@ -153,6 +281,13 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
                        ret = PTR_ERR(cros_port->port);
                        goto unregister_ports;
                }
+
+               ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
+               if (ret)
+                       dev_dbg(dev, "No switch control for port %d\n",
+                               port_num);
+
+               cros_typec_register_port_altmodes(typec, port_num);
        }
 
        return 0;
@@ -193,30 +328,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
        return ret;
 }
 
-static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
-                                 bool pd_en)
-{
-       struct cros_typec_port *port = typec->ports[port_num];
-       struct typec_partner_desc p_desc = {
-               .usb_pd = pd_en,
-       };
-       int ret = 0;
-
-       /*
-        * Fill an initial PD identity, which will then be updated with info
-        * from the EC.
-        */
-       p_desc.identity = &port->p_identity;
-
-       port->partner = typec_register_partner(port->port, &p_desc);
-       if (IS_ERR(port->partner)) {
-               ret = PTR_ERR(port->partner);
-               port->partner = NULL;
-       }
-
-       return ret;
-}
-
 static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
                int port_num, struct ec_response_usb_pd_control *resp)
 {
@@ -270,16 +381,166 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
        } else {
                if (!typec->ports[port_num]->partner)
                        return;
+               cros_typec_remove_partner(typec, port_num);
+       }
+}
 
-               typec_unregister_partner(typec->ports[port_num]->partner);
-               typec->ports[port_num]->partner = NULL;
+static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
+                                  struct ec_response_usb_pd_mux_info *resp)
+{
+       struct ec_params_usb_pd_mux_info req = {
+               .port = port_num,
+       };
+
+       return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
+                                    sizeof(req), resp, sizeof(*resp));
+}
+
+static int cros_typec_usb_safe_state(struct cros_typec_port *port)
+{
+       port->state.mode = TYPEC_STATE_SAFE;
+
+       return typec_mux_set(port->mux, &port->state);
+}
+
+/*
+ * Spoof the VDOs that were likely communicated by the partner for TBT alt
+ * mode.
+ */
+static int cros_typec_enable_tbt(struct cros_typec_data *typec,
+                                int port_num,
+                                struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+       struct typec_thunderbolt_data data;
+       int ret;
+
+       if (typec->pd_ctrl_ver < 2) {
+               dev_err(typec->dev,
+                       "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+               return -ENOTSUPP;
+       }
+
+       /* Device Discover Mode VDO */
+       data.device_mode = TBT_MODE;
+
+       if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER)
+               data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3);
+
+       /* Cable Discover Mode VDO */
+       data.cable_mode = TBT_MODE;
+       data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+       if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+               data.cable_mode |= TBT_CABLE_OPTICAL;
+
+       if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
+               data.cable_mode |= TBT_CABLE_LINK_TRAINING;
+
+       if (pd_ctrl->cable_gen)
+               data.cable_mode |= TBT_CABLE_ROUNDED;
+
+       /* Enter Mode VDO */
+       data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
+
+       if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+               data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
+
+       if (!port->state.alt) {
+               port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT];
+               ret = cros_typec_usb_safe_state(port);
+               if (ret)
+                       return ret;
+       }
+
+       port->state.data = &data;
+       port->state.mode = TYPEC_TBT_MODE;
+
+       return typec_mux_set(port->mux, &port->state);
+}
+
+/* Spoof the VDOs that were likely communicated by the partner. */
+static int cros_typec_enable_dp(struct cros_typec_data *typec,
+                               int port_num,
+                               struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+       struct typec_displayport_data dp_data;
+       int ret;
+
+       if (typec->pd_ctrl_ver < 2) {
+               dev_err(typec->dev,
+                       "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
+               return -ENOTSUPP;
+       }
+
+       /* Status VDO. */
+       dp_data.status = DP_STATUS_ENABLED;
+       if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
+               dp_data.status |= DP_STATUS_IRQ_HPD;
+       if (port->mux_flags & USB_PD_MUX_HPD_LVL)
+               dp_data.status |= DP_STATUS_HPD_STATE;
+
+       /* Configuration VDO. */
+       dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
+       if (!port->state.alt) {
+               port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP];
+               ret = cros_typec_usb_safe_state(port);
+               if (ret)
+                       return ret;
        }
+
+       port->state.data = &dp_data;
+       port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
+
+       return typec_mux_set(port->mux, &port->state);
+}
+
+static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
+                               uint8_t mux_flags,
+                               struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+       struct cros_typec_port *port = typec->ports[port_num];
+       enum typec_orientation orientation;
+       int ret;
+
+       if (!port->partner)
+               return 0;
+
+       if (mux_flags & USB_PD_MUX_POLARITY_INVERTED)
+               orientation = TYPEC_ORIENTATION_REVERSE;
+       else
+               orientation = TYPEC_ORIENTATION_NORMAL;
+
+       ret = typec_switch_set(port->ori_sw, orientation);
+       if (ret)
+               return ret;
+
+       if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+               ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
+       } else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
+               ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
+       } else if (mux_flags & USB_PD_MUX_SAFE_MODE) {
+               ret = cros_typec_usb_safe_state(port);
+       } else if (mux_flags & USB_PD_MUX_USB_ENABLED) {
+               port->state.alt = NULL;
+               port->state.mode = TYPEC_STATE_USB;
+               ret = typec_mux_set(port->mux, &port->state);
+       } else {
+               dev_info(typec->dev,
+                        "Unsupported mode requested, mux flags: %x\n",
+                        mux_flags);
+               ret = -ENOTSUPP;
+       }
+
+       return ret;
 }
 
 static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
 {
        struct ec_params_usb_pd_control req;
-       struct ec_response_usb_pd_control_v1 resp;
+       struct ec_response_usb_pd_control_v2 resp;
+       struct ec_response_usb_pd_mux_info mux_resp;
        int ret;
 
        if (port_num < 0 || port_num >= typec->num_ports) {
@@ -293,7 +554,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
        req.swap = USB_PD_CTRL_SWAP_NONE;
 
-       ret = cros_typec_ec_command(typec, typec->cmd_ver,
+       ret = cros_typec_ec_command(typec, typec->pd_ctrl_ver,
                                    EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
                                    &resp, sizeof(resp));
        if (ret < 0)
@@ -304,13 +565,33 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
        dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
        dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
 
-       if (typec->cmd_ver == 1)
-               cros_typec_set_port_params_v1(typec, port_num, &resp);
+       if (typec->pd_ctrl_ver != 0)
+               cros_typec_set_port_params_v1(typec, port_num,
+                       (struct ec_response_usb_pd_control_v1 *)&resp);
        else
                cros_typec_set_port_params_v0(typec, port_num,
                        (struct ec_response_usb_pd_control *) &resp);
 
-       return 0;
+       /* Update the switches if they exist, according to requested state */
+       ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
+       if (ret < 0) {
+               dev_warn(typec->dev,
+                        "Failed to get mux info for port: %d, err = %d\n",
+                        port_num, ret);
+               return 0;
+       }
+
+       /* No change needs to be made, let's exit early. */
+       if (typec->ports[port_num]->mux_flags == mux_resp.flags)
+               return 0;
+
+       typec->ports[port_num]->mux_flags = mux_resp.flags;
+       ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp);
+       if (ret)
+               dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+
+       return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+                                       !!(resp.role & PD_CTRL_RESP_ROLE_DATA));
 }
 
 static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
@@ -327,22 +608,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
        if (ret < 0)
                return ret;
 
-       if (resp.version_mask & EC_VER_MASK(1))
-               typec->cmd_ver = 1;
+       if (resp.version_mask & EC_VER_MASK(2))
+               typec->pd_ctrl_ver = 2;
+       else if (resp.version_mask & EC_VER_MASK(1))
+               typec->pd_ctrl_ver = 1;
        else
-               typec->cmd_ver = 0;
+               typec->pd_ctrl_ver = 0;
 
        dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n",
-               typec->cmd_ver);
+               typec->pd_ctrl_ver);
 
        return 0;
 }
 
-static int cros_ec_typec_event(struct notifier_block *nb,
-                              unsigned long host_event, void *_notify)
+static void cros_typec_port_work(struct work_struct *work)
 {
-       struct cros_typec_data *typec = container_of(nb, struct cros_typec_data,
-                                                    nb);
+       struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
        int ret, i;
 
        for (i = 0; i < typec->num_ports; i++) {
@@ -350,6 +631,14 @@ static int cros_ec_typec_event(struct notifier_block *nb,
                if (ret < 0)
                        dev_warn(typec->dev, "Update failed for port: %d\n", i);
        }
+}
+
+static int cros_ec_typec_event(struct notifier_block *nb,
+                              unsigned long host_event, void *_notify)
+{
+       struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
+
+       schedule_work(&typec->port_work);
 
        return NOTIFY_OK;
 }
@@ -408,6 +697,12 @@ static int cros_typec_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
+       INIT_WORK(&typec->port_work, cros_typec_port_work);
+
+       /*
+        * Safe to call port update here, since we haven't registered the
+        * PD notifier yet.
+        */
        for (i = 0; i < typec->num_ports; i++) {
                ret = cros_typec_port_update(typec, i);
                if (ret < 0)
@@ -426,11 +721,35 @@ unregister_ports:
        return ret;
 }
 
+static int __maybe_unused cros_typec_suspend(struct device *dev)
+{
+       struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+       cancel_work_sync(&typec->port_work);
+
+       return 0;
+}
+
+static int __maybe_unused cros_typec_resume(struct device *dev)
+{
+       struct cros_typec_data *typec = dev_get_drvdata(dev);
+
+       /* Refresh port state. */
+       schedule_work(&typec->port_work);
+
+       return 0;
+}
+
+static const struct dev_pm_ops cros_typec_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume)
+};
+
 static struct platform_driver cros_typec_driver = {
        .driver = {
                .name = DRV_NAME,
                .acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
                .of_match_table = of_match_ptr(cros_typec_of_match),
+               .pm = &cros_typec_pm_ops,
        },
        .probe = cros_typec_probe,
 };
index 5739a96..bbc4e71 100644 (file)
@@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
                        vdev_id = VIRTIO_ID_NET;
                        hdr_len = sizeof(struct virtio_net_hdr);
                        config = &fifo->vdev[vdev_id]->config.net;
-                       if (ntohs(hdr.len) > config->mtu +
+                       /* A legacy-only interface for now. */
+                       if (ntohs(hdr.len) >
+                           __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+                                             config->mtu) +
                            MLXBF_TMFIFO_NET_L2_OVERHEAD)
                                return;
                } else {
@@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
 
        /* Create the network vdev. */
        memset(&net_config, 0, sizeof(net_config));
-       net_config.mtu = ETH_DATA_LEN;
-       net_config.status = VIRTIO_NET_S_LINK_UP;
+
+       /* A legacy-only interface for now. */
+       net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+                                          ETH_DATA_LEN);
+       net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
+                                             VIRTIO_NET_S_LINK_UP);
        mlxbf_tmfifo_get_cfg_mac(net_config.mac);
        rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
                                      MLXBF_TMFIFO_NET_FEATURES, &net_config,
index 90bc796..8cf8c1b 100644 (file)
 #define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
 #define MLXPLAT_CPLD_WD_FAN_ACT_MASK   (GENMASK(7, 0) & ~BIT(4))
 #define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
+#define MLXPLAT_CPLD_WD_CPBLTY_MASK    (GENMASK(7, 0) & ~BIT(6))
 #define MLXPLAT_CPLD_WD_DFLT_TIMEOUT   30
+#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT  600
 #define MLXPLAT_CPLD_WD_MAX_DEVS       2
 
 /* mlxplat_priv - platform private data
@@ -2084,6 +2086,84 @@ static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
        },
 };
 
+/* Watchdog type3: hardware implementation version 3
+ * Can be on all systems. It's differentiated by WD capability bit.
+ * Old systems (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140)
+ * still have only one main watchdog.
+ */
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type3[] = {
+       {
+               .label = "action",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+               .bit = 0,
+       },
+       {
+               .label = "timeout",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+               .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+       },
+       {
+               .label = "timeleft",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+       },
+       {
+               .label = "ping",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
+               .bit = 0,
+       },
+       {
+               .label = "reset",
+               .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+               .mask = GENMASK(7, 0) & ~BIT(6),
+               .bit = 6,
+       },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type3[] = {
+       {
+               .label = "action",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+               .bit = 4,
+       },
+       {
+               .label = "timeout",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+               .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
+       },
+       {
+               .label = "timeleft",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
+       },
+       {
+               .label = "ping",
+               .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
+               .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
+               .bit = 4,
+       },
+};
+
+static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
+       {
+               .data = mlxplat_mlxcpld_wd_main_regs_type3,
+               .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type3),
+               .version = MLX_WDT_TYPE3,
+               .identity = "mlx-wdt-main",
+       },
+       {
+               .data = mlxplat_mlxcpld_wd_aux_regs_type3,
+               .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type3),
+               .version = MLX_WDT_TYPE3,
+               .identity = "mlx-wdt-aux",
+       },
+};
+
 static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -2114,8 +2194,10 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
        case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
+       case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
        case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
@@ -2742,6 +2824,27 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
        return 0;
 }
 
+static int mlxplat_mlxcpld_check_wd_capability(void *regmap)
+{
+       u32 regval;
+       int i, rc;
+
+       rc = regmap_read(regmap, MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+                        &regval);
+       if (rc)
+               return rc;
+
+       if (!(regval & ~MLXPLAT_CPLD_WD_CPBLTY_MASK)) {
+               for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++) {
+                       if (mlxplat_wd_data[i])
+                               mlxplat_wd_data[i] =
+                                       &mlxplat_mlxcpld_wd_set_type3[i];
+               }
+       }
+
+       return 0;
+}
+
 static int __init mlxplat_init(void)
 {
        struct mlxplat_priv *priv;
@@ -2874,6 +2977,9 @@ static int __init mlxplat_init(void)
        }
 
        /* Add WD drivers. */
+       err = mlxplat_mlxcpld_check_wd_capability(priv->regmap);
+       if (err)
+               goto fail_platform_wd_register;
        for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
                if (mlxplat_wd_data[j]) {
                        mlxplat_wd_data[j]->regmap = priv->regmap;
index 004b2ea..276e939 100644 (file)
@@ -510,12 +510,12 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
            last->period > s2.period &&
            last->period <= state->period)
                dev_warn(chip->dev,
-                        ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n",
+                        ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n",
                         state->period, s2.period, last->period);
 
        if (state->enabled && state->period < s2.period)
                dev_warn(chip->dev,
-                        ".apply is supposed to round down period (requested: %u, applied: %u)\n",
+                        ".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
                         state->period, s2.period);
 
        if (state->enabled &&
@@ -524,14 +524,14 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
            last->duty_cycle > s2.duty_cycle &&
            last->duty_cycle <= state->duty_cycle)
                dev_warn(chip->dev,
-                        ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n",
+                        ".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n",
                         state->duty_cycle, state->period,
                         s2.duty_cycle, s2.period,
                         last->duty_cycle, last->period);
 
        if (state->enabled && state->duty_cycle < s2.duty_cycle)
                dev_warn(chip->dev,
-                        ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n",
+                        ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
                         state->duty_cycle, state->period,
                         s2.duty_cycle, s2.period);
 
@@ -558,7 +558,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
            (s1.enabled && s1.period != last->period) ||
            (s1.enabled && s1.duty_cycle != last->duty_cycle)) {
                dev_err(chip->dev,
-                       ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n",
+                       ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n",
                        s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
                        last->enabled, last->polarity, last->duty_cycle,
                        last->period);
@@ -1284,8 +1284,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
                if (state.enabled)
                        seq_puts(s, " enabled");
 
-               seq_printf(s, " period: %u ns", state.period);
-               seq_printf(s, " duty: %u ns", state.duty_cycle);
+               seq_printf(s, " period: %llu ns", state.period);
+               seq_printf(s, " duty: %llu ns", state.duty_cycle);
                seq_printf(s, " polarity: %s",
                           state.polarity ? "inverse" : "normal");
 
index 1f829ed..79b1e58 100644 (file)
@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        u64 tmp, multi, rate;
        u32 value, prescale;
 
-       rate = clk_get_rate(ip->clk);
-
        value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
 
        if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        else
                state->polarity = PWM_POLARITY_INVERSED;
 
+       rate = clk_get_rate(ip->clk);
+       if (rate == 0) {
+               state->period = 0;
+               state->duty_cycle = 0;
+               return;
+       }
+
        value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
        prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
        prescale &= IPROC_PWM_PRESCALE_MAX;
@@ -143,8 +148,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                value = rate * state->duty_cycle;
                duty = div64_u64(value, div);
 
-               if (period < IPROC_PWM_PERIOD_MIN ||
-                   duty < IPROC_PWM_DUTY_CYCLE_MIN)
+               if (period < IPROC_PWM_PERIOD_MIN)
                        return -EINVAL;
 
                if (period <= IPROC_PWM_PERIOD_MAX &&
index 81da91d..16c5898 100644 (file)
@@ -138,7 +138,7 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
                dc = div64_u64(val, div);
 
                /* If duty_ns or period_ns are not achievable then return */
-               if (pc < PERIOD_COUNT_MIN || dc < DUTY_CYCLE_HIGH_MIN)
+               if (pc < PERIOD_COUNT_MIN)
                        return -EINVAL;
 
                /* If pc and dc are in bounds, the calculation is done */
index 924d39a..ba9500a 100644 (file)
@@ -43,7 +43,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
 static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
 {
        /* Duty cycle 0..15 max */
-       return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period);
+       return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period);
 }
 
 static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
index 5f3d7f7..fcdf6be 100644 (file)
@@ -124,7 +124,7 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip,
                real_state->duty_cycle = state->duty_cycle;
 
        tmp = (u64)p->mod * real_state->duty_cycle;
-       p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period);
+       p->val = DIV64_U64_ROUND_CLOSEST(tmp, real_state->period);
 
        real_state->polarity = state->polarity;
        real_state->enabled = state->enabled;
index 732a6f3..c50d453 100644 (file)
@@ -202,7 +202,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
        sr = readl(imx->mmio_base + MX3_PWMSR);
        fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr);
        if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
-               period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
+               period_ms = DIV_ROUND_UP_ULL(pwm_get_period(pwm),
                                         NSEC_PER_MSEC);
                msleep(period_ms);
 
index 674f0e2..7d33e36 100644 (file)
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
-#define IQS620_PWR_SETTINGS                    0xD2
+#define IQS620_PWR_SETTINGS                    0xd2
 #define IQS620_PWR_SETTINGS_PWM_OUT            BIT(7)
 
-#define IQS620_PWM_DUTY_CYCLE                  0xD8
+#define IQS620_PWM_DUTY_CYCLE                  0xd8
 
 #define IQS620_PWM_PERIOD_NS                   1000000
 
@@ -46,7 +46,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 {
        struct iqs620_pwm_private *iqs620_pwm;
        struct iqs62x_core *iqs62x;
-       int duty_scale, ret;
+       u64 duty_scale;
+       int ret;
 
        if (state->polarity != PWM_POLARITY_NORMAL)
                return -ENOTSUPP;
@@ -69,7 +70,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * For lower duty cycles (e.g. 0), the PWM output is simply disabled to
         * allow an external pull-down resistor to hold the GPIO3/LTX pin low.
         */
-       duty_scale = state->duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
+       duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS);
 
        mutex_lock(&iqs620_pwm->lock);
 
@@ -81,7 +82,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        }
 
        if (duty_scale) {
-               u8 duty_val = min(duty_scale - 1, 0xFF);
+               u8 duty_val = min_t(u64, duty_scale - 1, 0xff);
 
                ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
                                   duty_val);
@@ -93,7 +94,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
        if (state->enabled && duty_scale) {
                ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
-                                        IQS620_PWR_SETTINGS_PWM_OUT, 0xFF);
+                                        IQS620_PWR_SETTINGS_PWM_OUT, 0xff);
                if (ret)
                        goto err_mutex;
        }
@@ -159,7 +160,7 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
 
        ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS,
                                 IQS620_PWR_SETTINGS_PWM_OUT,
-                                iqs620_pwm->out_en ? 0xFF : 0);
+                                iqs620_pwm->out_en ? 0xff : 0);
 
 err_mutex:
        mutex_unlock(&iqs620_pwm->lock);
index b94e0d0..ab001ce 100644 (file)
@@ -46,6 +46,7 @@ struct pwm_mediatek_of_data {
  * @clk_main: the clock used by PWM core
  * @clk_pwms: the clock used by each PWM channel
  * @clk_freq: the fix clock frequency of legacy MIPS SoC
+ * @soc: pointer to chip's platform data
  */
 struct pwm_mediatek_chip {
        struct pwm_chip chip;
index 0d31833..358db4f 100644 (file)
@@ -14,7 +14,7 @@
  *   with a timer counter that goes up. When it overflows it gets
  *   reloaded with the load value and the pwm output goes up.
  *   When counter matches with match register, the output goes down.
- *   Reference Manual: http://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
+ *   Reference Manual: https://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
  *
  * Limitations:
  * - When PWM is stopped, timer counter gets stopped immediately. This
@@ -58,7 +58,7 @@
  * @mutex:             Mutex to protect pwm apply state
  * @dm_timer:          Pointer to omap dm timer.
  * @pdata:             Pointer to omap dm timer ops.
- * dm_timer_pdev:      Pointer to omap dm timer platform device
+ * @dm_timer_pdev:     Pointer to omap dm timer platform device
  */
 struct pwm_omap_dmtimer_chip {
        struct pwm_chip chip;
index cc63f9b..62de0bb 100644 (file)
@@ -181,7 +181,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * consecutively
         */
        num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH);
-       frac = DIV_ROUND_CLOSEST_ULL(num, state->period);
+       frac = DIV64_U64_ROUND_CLOSEST(num, state->period);
        /* The hardware cannot generate a 100% duty cycle */
        frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1);
 
index 67fca62..134c146 100644 (file)
@@ -61,7 +61,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        do_div(div, NSEC_PER_SEC);
        if (!div) {
                /* Clock is too slow to achieve requested period. */
-               dev_dbg(priv->chip.dev, "Can't reach %u ns\n",  state->period);
+               dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period);
                return -EINVAL;
        }
 
index 18fbbe3..961c59c 100644 (file)
@@ -285,7 +285,7 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
        sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
        sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
-               usecs_to_jiffies(cstate.period / 1000 + 1);
+               nsecs_to_jiffies(cstate.period + 1000);
 
        if (state->polarity != PWM_POLARITY_NORMAL)
                ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
index ab38c82..683804c 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * ECAP PWM driver
  *
- * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/
  */
 
 #include <linux/module.h>
index 7b4c770..0846917 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * EHRPWM PWM driver
  *
- * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/
  */
 
 #include <linux/module.h>
index 2389b86..449dbc0 100644 (file)
@@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child,
 
        pwm_get_state(pwm, &state);
 
-       return sprintf(buf, "%u\n", state.period);
+       return sprintf(buf, "%llu\n", state.period);
 }
 
 static ssize_t period_store(struct device *child,
@@ -52,10 +52,10 @@ static ssize_t period_store(struct device *child,
        struct pwm_export *export = child_to_pwm_export(child);
        struct pwm_device *pwm = export->pwm;
        struct pwm_state state;
-       unsigned int val;
+       u64 val;
        int ret;
 
-       ret = kstrtouint(buf, 0, &val);
+       ret = kstrtou64(buf, 0, &val);
        if (ret)
                return ret;
 
@@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child,
 
        pwm_get_state(pwm, &state);
 
-       return sprintf(buf, "%u\n", state.duty_cycle);
+       return sprintf(buf, "%llu\n", state.duty_cycle);
 }
 
 static ssize_t duty_cycle_store(struct device *child,
index 451608e..c07ceec 100644 (file)
@@ -981,7 +981,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
 
        if (unlikely(copy_from_user(transfer,
                                    (void __user *)(uintptr_t)transaction.block,
-                                   transaction.count * sizeof(*transfer)))) {
+                                   array_size(sizeof(*transfer), transaction.count)))) {
                ret = -EFAULT;
                goto out_free;
        }
@@ -994,7 +994,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
 
        if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
                                  transfer,
-                                 transaction.count * sizeof(*transfer))))
+                                 array_size(sizeof(*transfer), transaction.count))))
                ret = -EFAULT;
 
 out_free:
@@ -1710,8 +1710,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
        if (rval & RIO_PEF_SWITCH) {
                rio_mport_read_config_32(mport, destid, hopcount,
                                         RIO_SWP_INFO_CAR, &swpinfo);
-               size += (RIO_GET_TOTAL_PORTS(swpinfo) *
-                        sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+               size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
        }
 
        rdev = kzalloc(size, GFP_KERNEL);
index eb8ed28..19b0c33 100644 (file)
@@ -330,7 +330,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
        size_t size;
        u32 swpinfo = 0;
 
-       size = sizeof(struct rio_dev);
+       size = sizeof(*rdev);
        if (rio_mport_read_config_32(port, destid, hopcount,
                                     RIO_PEF_CAR, &result))
                return NULL;
@@ -338,10 +338,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
        if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
                rio_mport_read_config_32(port, destid, hopcount,
                                         RIO_SWP_INFO_CAR, &swpinfo);
-               if (result & RIO_PEF_SWITCH) {
-                       size += (RIO_GET_TOTAL_PORTS(swpinfo) *
-                               sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
-               }
+               if (result & RIO_PEF_SWITCH)
+                       size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
        }
 
        rdev = kzalloc(size, GFP_KERNEL);
index c4d1731..c6659df 100644 (file)
@@ -14,6 +14,15 @@ config REMOTEPROC
 
 if REMOTEPROC
 
+config REMOTEPROC_CDEV
+       bool "Remoteproc character device interface"
+       help
+         Say y here to have a character device interface for the remoteproc
+         framework. Userspace can boot/shutdown remote processors through
+         this interface.
+
+         It's safe to say N if you don't want to use this interface.
+
 config IMX_REMOTEPROC
        tristate "IMX6/7 remoteproc support"
        depends on ARCH_MXC
@@ -116,6 +125,9 @@ config KEYSTONE_REMOTEPROC
          It's safe to say N here if you're not interested in the Keystone
          DSPs or just want to use a bare minimum kernel.
 
+config QCOM_PIL_INFO
+       tristate
+
 config QCOM_RPROC_COMMON
        tristate
 
@@ -132,6 +144,7 @@ config QCOM_Q6V5_ADSP
        depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        depends on QCOM_SYSMON || QCOM_SYSMON=n
        select MFD_SYSCON
+       select QCOM_PIL_INFO
        select QCOM_MDT_LOADER
        select QCOM_Q6V5_COMMON
        select QCOM_RPROC_COMMON
@@ -148,8 +161,8 @@ config QCOM_Q6V5_MSS
        depends on QCOM_SYSMON || QCOM_SYSMON=n
        select MFD_SYSCON
        select QCOM_MDT_LOADER
+       select QCOM_PIL_INFO
        select QCOM_Q6V5_COMMON
-       select QCOM_Q6V5_IPA_NOTIFY
        select QCOM_RPROC_COMMON
        select QCOM_SCM
        help
@@ -164,6 +177,7 @@ config QCOM_Q6V5_PAS
        depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
        depends on QCOM_SYSMON || QCOM_SYSMON=n
        select MFD_SYSCON
+       select QCOM_PIL_INFO
        select QCOM_MDT_LOADER
        select QCOM_Q6V5_COMMON
        select QCOM_RPROC_COMMON
@@ -182,6 +196,7 @@ config QCOM_Q6V5_WCSS
        depends on QCOM_SYSMON || QCOM_SYSMON=n
        select MFD_SYSCON
        select QCOM_MDT_LOADER
+       select QCOM_PIL_INFO
        select QCOM_Q6V5_COMMON
        select QCOM_RPROC_COMMON
        select QCOM_SCM
@@ -189,9 +204,6 @@ config QCOM_Q6V5_WCSS
          Say y here to support the Qualcomm Peripheral Image Loader for the
          Hexagon V5 based WCSS remote processors.
 
-config QCOM_Q6V5_IPA_NOTIFY
-       tristate
-
 config QCOM_SYSMON
        tristate "Qualcomm sysmon driver"
        depends on RPMSG
@@ -215,6 +227,7 @@ config QCOM_WCNSS_PIL
        depends on QCOM_SMEM
        depends on QCOM_SYSMON || QCOM_SYSMON=n
        select QCOM_MDT_LOADER
+       select QCOM_PIL_INFO
        select QCOM_RPROC_COMMON
        select QCOM_SCM
        help
@@ -249,6 +262,19 @@ config STM32_RPROC
 
          This can be either built-in or a loadable module.
 
+config TI_K3_DSP_REMOTEPROC
+       tristate "TI K3 DSP remoteproc support"
+       depends on ARCH_K3
+       select MAILBOX
+       select OMAP2PLUS_MBOX
+       help
+         Say m here to support TI's C66x and C71x DSP remote processor
+         subsystems on various TI K3 family of SoCs through the remote
+         processor framework.
+
+         It's safe to say N here if you're not interested in utilizing
+         the DSP slave processors.
+
 endif # REMOTEPROC
 
 endmenu
index e8b886e..3dfa28e 100644 (file)
@@ -5,10 +5,12 @@
 
 obj-$(CONFIG_REMOTEPROC)               += remoteproc.o
 remoteproc-y                           := remoteproc_core.o
+remoteproc-y                           += remoteproc_coredump.o
 remoteproc-y                           += remoteproc_debugfs.o
 remoteproc-y                           += remoteproc_sysfs.o
 remoteproc-y                           += remoteproc_virtio.o
 remoteproc-y                           += remoteproc_elf_loader.o
+obj-$(CONFIG_REMOTEPROC_CDEV)          += remoteproc_cdev.o
 obj-$(CONFIG_IMX_REMOTEPROC)           += imx_rproc.o
 obj-$(CONFIG_INGENIC_VPU_RPROC)                += ingenic_rproc.o
 obj-$(CONFIG_MTK_SCP)                  += mtk_scp.o mtk_scp_ipi.o
@@ -16,13 +18,13 @@ obj-$(CONFIG_OMAP_REMOTEPROC)               += omap_remoteproc.o
 obj-$(CONFIG_WKUP_M3_RPROC)            += wkup_m3_rproc.o
 obj-$(CONFIG_DA8XX_REMOTEPROC)         += da8xx_remoteproc.o
 obj-$(CONFIG_KEYSTONE_REMOTEPROC)      += keystone_remoteproc.o
+obj-$(CONFIG_QCOM_PIL_INFO)            += qcom_pil_info.o
 obj-$(CONFIG_QCOM_RPROC_COMMON)                += qcom_common.o
 obj-$(CONFIG_QCOM_Q6V5_COMMON)         += qcom_q6v5.o
 obj-$(CONFIG_QCOM_Q6V5_ADSP)           += qcom_q6v5_adsp.o
 obj-$(CONFIG_QCOM_Q6V5_MSS)            += qcom_q6v5_mss.o
 obj-$(CONFIG_QCOM_Q6V5_PAS)            += qcom_q6v5_pas.o
 obj-$(CONFIG_QCOM_Q6V5_WCSS)           += qcom_q6v5_wcss.o
-obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY)     += qcom_q6v5_ipa_notify.o
 obj-$(CONFIG_QCOM_SYSMON)              += qcom_sysmon.o
 obj-$(CONFIG_QCOM_WCNSS_PIL)           += qcom_wcnss_pil.o
 qcom_wcnss_pil-y                       += qcom_wcnss.o
@@ -30,3 +32,4 @@ qcom_wcnss_pil-y                      += qcom_wcnss_iris.o
 obj-$(CONFIG_ST_REMOTEPROC)            += st_remoteproc.o
 obj-$(CONFIG_ST_SLIM_REMOTEPROC)       += st_slim_rproc.o
 obj-$(CONFIG_STM32_RPROC)              += stm32_rproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC)     += ti_k3_dsp_remoteproc.o
index 189020d..1c2b21a 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/remoteproc.h>
 
 #include "remoteproc_internal.h"
@@ -62,6 +61,28 @@ struct vpu {
        struct device *dev;
 };
 
+static int ingenic_rproc_prepare(struct rproc *rproc)
+{
+       struct vpu *vpu = rproc->priv;
+       int ret;
+
+       /* The clocks must be enabled for the firmware to be loaded in TCSM */
+       ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+       if (ret)
+               dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
+
+       return ret;
+}
+
+static int ingenic_rproc_unprepare(struct rproc *rproc)
+{
+       struct vpu *vpu = rproc->priv;
+
+       clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+       return 0;
+}
+
 static int ingenic_rproc_start(struct rproc *rproc)
 {
        struct vpu *vpu = rproc->priv;
@@ -115,6 +136,8 @@ static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
 }
 
 static struct rproc_ops ingenic_rproc_ops = {
+       .prepare = ingenic_rproc_prepare,
+       .unprepare = ingenic_rproc_unprepare,
        .start = ingenic_rproc_start,
        .stop = ingenic_rproc_stop,
        .kick = ingenic_rproc_kick,
@@ -135,16 +158,6 @@ static irqreturn_t vpu_interrupt(int irq, void *data)
        return rproc_vq_interrupt(rproc, vring);
 }
 
-static void ingenic_rproc_disable_clks(void *data)
-{
-       struct vpu *vpu = data;
-
-       pm_runtime_resume(vpu->dev);
-       pm_runtime_disable(vpu->dev);
-
-       clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
 static int ingenic_rproc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -206,35 +219,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
 
        disable_irq(vpu->irq);
 
-       /* The clocks must be enabled for the firmware to be loaded in TCSM */
-       ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
-       if (ret) {
-               dev_err(dev, "Unable to start clocks\n");
-               return ret;
-       }
-
-       pm_runtime_irq_safe(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
-       pm_runtime_use_autosuspend(dev);
-
-       ret = devm_add_action_or_reset(dev, ingenic_rproc_disable_clks, vpu);
-       if (ret) {
-               dev_err(dev, "Unable to register action\n");
-               goto out_pm_put;
-       }
-
        ret = devm_rproc_add(dev, rproc);
        if (ret) {
                dev_err(dev, "Failed to register remote processor\n");
-               goto out_pm_put;
+               return ret;
        }
 
-out_pm_put:
-       pm_runtime_put_autosuspend(dev);
-
-       return ret;
+       return 0;
 }
 
 static const struct of_device_id ingenic_rproc_of_matches[] = {
@@ -243,33 +234,10 @@ static const struct of_device_id ingenic_rproc_of_matches[] = {
 };
 MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
 
-static int __maybe_unused ingenic_rproc_suspend(struct device *dev)
-{
-       struct vpu *vpu = dev_get_drvdata(dev);
-
-       clk_bulk_disable(ARRAY_SIZE(vpu->clks), vpu->clks);
-
-       return 0;
-}
-
-static int __maybe_unused ingenic_rproc_resume(struct device *dev)
-{
-       struct vpu *vpu = dev_get_drvdata(dev);
-
-       return clk_bulk_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
-}
-
-static const struct dev_pm_ops __maybe_unused ingenic_rproc_pm = {
-       SET_RUNTIME_PM_OPS(ingenic_rproc_suspend, ingenic_rproc_resume, NULL)
-};
-
 static struct platform_driver ingenic_rproc_driver = {
        .probe = ingenic_rproc_probe,
        .driver = {
                .name = "ingenic-vpu",
-#ifdef CONFIG_PM
-               .pm = &ingenic_rproc_pm,
-#endif
                .of_match_table = ingenic_rproc_of_matches,
        },
 };
index 9028cea..085fd73 100644 (file)
 #include <linux/module.h>
 #include <linux/notifier.h>
 #include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
 #include <linux/rpmsg/qcom_glink.h>
 #include <linux/rpmsg/qcom_smd.h>
+#include <linux/slab.h>
 #include <linux/soc/qcom/mdt_loader.h>
 
 #include "remoteproc_internal.h"
 #define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
 #define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
 
-static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
+struct qcom_ssr_subsystem {
+       const char *name;
+       struct srcu_notifier_head notifier_list;
+       struct list_head list;
+};
+
+static LIST_HEAD(qcom_ssr_subsystem_list);
+static DEFINE_MUTEX(qcom_ssr_subsys_lock);
 
 static int glink_subdev_start(struct rproc_subdev *subdev)
 {
@@ -189,37 +198,122 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
 }
 EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
 
+static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
+{
+       struct qcom_ssr_subsystem *info;
+
+       mutex_lock(&qcom_ssr_subsys_lock);
+       /* Match in the global qcom_ssr_subsystem_list with name */
+       list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
+               if (!strcmp(info->name, name))
+                       goto out;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info) {
+               info = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+       info->name = kstrdup_const(name, GFP_KERNEL);
+       srcu_init_notifier_head(&info->notifier_list);
+
+       /* Add to global notification list */
+       list_add_tail(&info->list, &qcom_ssr_subsystem_list);
+
+out:
+       mutex_unlock(&qcom_ssr_subsys_lock);
+       return info;
+}
+
 /**
  * qcom_register_ssr_notifier() - register SSR notification handler
- * @nb:                notifier_block to notify for restart notifications
+ * @name:      Subsystem's SSR name
+ * @nb:                notifier_block to be invoked upon subsystem's state change
  *
- * Returns 0 on success, negative errno on failure.
+ * This registers the @nb notifier block as part the notifier chain for a
+ * remoteproc associated with @name. The notifier block's callback
+ * will be invoked when the remote processor's SSR events occur
+ * (pre/post startup and pre/post shutdown).
  *
- * This register the @notify function as handler for restart notifications. As
- * remote processors are stopped this function will be called, with the SSR
- * name passed as a parameter.
+ * Return: a subsystem cookie on success, ERR_PTR on failure.
  */
-int qcom_register_ssr_notifier(struct notifier_block *nb)
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
 {
-       return blocking_notifier_chain_register(&ssr_notifiers, nb);
+       struct qcom_ssr_subsystem *info;
+
+       info = qcom_ssr_get_subsys(name);
+       if (IS_ERR(info))
+               return info;
+
+       srcu_notifier_chain_register(&info->notifier_list, nb);
+
+       return &info->notifier_list;
 }
 EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
 
 /**
  * qcom_unregister_ssr_notifier() - unregister SSR notification handler
+ * @notify:    subsystem cookie returned from qcom_register_ssr_notifier
  * @nb:                notifier_block to unregister
+ *
+ * This function will unregister the notifier from the particular notifier
+ * chain.
+ *
+ * Return: 0 on success, %ENOENT otherwise.
  */
-void qcom_unregister_ssr_notifier(struct notifier_block *nb)
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
 {
-       blocking_notifier_chain_unregister(&ssr_notifiers, nb);
+       return srcu_notifier_chain_unregister(notify, nb);
 }
 EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
 
+static int ssr_notify_prepare(struct rproc_subdev *subdev)
+{
+       struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+       struct qcom_ssr_notify_data data = {
+               .name = ssr->info->name,
+               .crashed = false,
+       };
+
+       srcu_notifier_call_chain(&ssr->info->notifier_list,
+                                QCOM_SSR_BEFORE_POWERUP, &data);
+       return 0;
+}
+
+static int ssr_notify_start(struct rproc_subdev *subdev)
+{
+       struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+       struct qcom_ssr_notify_data data = {
+               .name = ssr->info->name,
+               .crashed = false,
+       };
+
+       srcu_notifier_call_chain(&ssr->info->notifier_list,
+                                QCOM_SSR_AFTER_POWERUP, &data);
+       return 0;
+}
+
+static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
+{
+       struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+       struct qcom_ssr_notify_data data = {
+               .name = ssr->info->name,
+               .crashed = crashed,
+       };
+
+       srcu_notifier_call_chain(&ssr->info->notifier_list,
+                                QCOM_SSR_BEFORE_SHUTDOWN, &data);
+}
+
 static void ssr_notify_unprepare(struct rproc_subdev *subdev)
 {
        struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+       struct qcom_ssr_notify_data data = {
+               .name = ssr->info->name,
+               .crashed = false,
+       };
 
-       blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name);
+       srcu_notifier_call_chain(&ssr->info->notifier_list,
+                                QCOM_SSR_AFTER_SHUTDOWN, &data);
 }
 
 /**
@@ -229,12 +323,24 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev)
  * @ssr_name:  identifier to use for notifications originating from @rproc
  *
  * As the @ssr is registered with the @rproc SSR events will be sent to all
- * registered listeners in the system as the remoteproc is shut down.
+ * registered listeners for the remoteproc when it's SSR events occur
+ * (pre/post startup and pre/post shutdown).
  */
 void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
                         const char *ssr_name)
 {
-       ssr->name = ssr_name;
+       struct qcom_ssr_subsystem *info;
+
+       info = qcom_ssr_get_subsys(ssr_name);
+       if (IS_ERR(info)) {
+               dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
+               return;
+       }
+
+       ssr->info = info;
+       ssr->subdev.prepare = ssr_notify_prepare;
+       ssr->subdev.start = ssr_notify_start;
+       ssr->subdev.stop = ssr_notify_stop;
        ssr->subdev.unprepare = ssr_notify_unprepare;
 
        rproc_add_subdev(rproc, &ssr->subdev);
@@ -249,6 +355,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
 void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
 {
        rproc_remove_subdev(rproc, &ssr->subdev);
+       ssr->info = NULL;
 }
 EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
 
index 34e5188..dfc641c 100644 (file)
@@ -26,10 +26,11 @@ struct qcom_rproc_subdev {
        struct qcom_smd_edge *edge;
 };
 
+struct qcom_ssr_subsystem;
+
 struct qcom_rproc_ssr {
        struct rproc_subdev subdev;
-
-       const char *name;
+       struct qcom_ssr_subsystem *info;
 };
 
 void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
new file mode 100644 (file)
index 0000000..5521c44
--- /dev/null
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include "qcom_pil_info.h"
+
+/*
+ * The PIL relocation information region is used to communicate memory regions
+ * occupied by co-processor firmware for post mortem crash analysis.
+ *
+ * It consists of an array of entries with an 8 byte textual identifier of the
+ * region followed by a 64 bit base address and 32 bit size, both little
+ * endian.
+ */
+#define PIL_RELOC_NAME_LEN     8
+#define PIL_RELOC_ENTRY_SIZE   (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
+
+struct pil_reloc {
+       void __iomem *base;
+       size_t num_entries;
+};
+
+static struct pil_reloc _reloc __read_mostly;
+static DEFINE_MUTEX(pil_reloc_lock);
+
+static int qcom_pil_info_init(void)
+{
+       struct device_node *np;
+       struct resource imem;
+       void __iomem *base;
+       int ret;
+
+       /* Already initialized? */
+       if (_reloc.base)
+               return 0;
+
+       np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
+       if (!np)
+               return -ENOENT;
+
+       ret = of_address_to_resource(np, 0, &imem);
+       of_node_put(np);
+       if (ret < 0)
+               return ret;
+
+       base = ioremap(imem.start, resource_size(&imem));
+       if (!base) {
+               pr_err("failed to map PIL relocation info region\n");
+               return -ENOMEM;
+       }
+
+       memset_io(base, 0, resource_size(&imem));
+
+       _reloc.base = base;
+       _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+
+       return 0;
+}
+
+/**
+ * qcom_pil_info_store() - store PIL information of image in IMEM
+ * @image:     name of the image
+ * @base:      base address of the loaded image
+ * @size:      size of the loaded image
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
+{
+       char buf[PIL_RELOC_NAME_LEN];
+       void __iomem *entry;
+       int ret;
+       int i;
+
+       mutex_lock(&pil_reloc_lock);
+       ret = qcom_pil_info_init();
+       if (ret < 0) {
+               mutex_unlock(&pil_reloc_lock);
+               return ret;
+       }
+
+       for (i = 0; i < _reloc.num_entries; i++) {
+               entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
+
+               memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
+
+               /*
+                * An empty record means we didn't find it, given that the
+                * records are packed.
+                */
+               if (!buf[0])
+                       goto found_unused;
+
+               if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
+                       goto found_existing;
+       }
+
+       pr_warn("insufficient PIL info slots\n");
+       mutex_unlock(&pil_reloc_lock);
+       return -ENOMEM;
+
+found_unused:
+       memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
+found_existing:
+       /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+       writel(base, entry + PIL_RELOC_NAME_LEN);
+       writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
+       writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
+       mutex_unlock(&pil_reloc_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_pil_info_store);
+
+static void __exit pil_reloc_exit(void)
+{
+       mutex_lock(&pil_reloc_lock);
+       iounmap(_reloc.base);
+       _reloc.base = NULL;
+       mutex_unlock(&pil_reloc_lock);
+}
+module_exit(pil_reloc_exit);
+
+MODULE_DESCRIPTION("Qualcomm PIL relocation info");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h
new file mode 100644 (file)
index 0000000..0dce614
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PIL_INFO_H__
+#define __QCOM_PIL_INFO_H__
+
+#include <linux/types.h>
+
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
+
+#endif
index 111a442..fd6fd36 100644 (file)
@@ -153,6 +153,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
 {
        int ret;
 
+       q6v5->running = false;
+
        qcom_smem_state_update_bits(q6v5->state,
                                    BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
 
index d2a2574..efb2c1a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/soc/qcom/smem_state.h>
 
 #include "qcom_common.h"
+#include "qcom_pil_info.h"
 #include "qcom_q6v5.h"
 #include "remoteproc_internal.h"
 
@@ -82,6 +83,7 @@ struct qcom_adsp {
        unsigned int halt_lpass;
 
        int crash_reason_smem;
+       const char *info_name;
 
        struct completion start_done;
        struct completion stop_done;
@@ -164,10 +166,17 @@ reset:
 static int adsp_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+       int ret;
+
+       ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+                                   adsp->mem_region, adsp->mem_phys,
+                                   adsp->mem_size, &adsp->mem_reloc);
+       if (ret)
+               return ret;
+
+       qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
 
-       return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
-                            adsp->mem_region, adsp->mem_phys, adsp->mem_size,
-                            &adsp->mem_reloc);
+       return 0;
 }
 
 static int adsp_start(struct rproc *rproc)
@@ -436,6 +445,7 @@ static int adsp_probe(struct platform_device *pdev)
        adsp = (struct qcom_adsp *)rproc->priv;
        adsp->dev = &pdev->dev;
        adsp->rproc = rproc;
+       adsp->info_name = desc->sysmon_name;
        platform_set_drvdata(pdev, adsp);
 
        ret = adsp_alloc_memory_region(adsp);
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
deleted file mode 100644 (file)
index e1c10a1..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-/*
- * Qualcomm IPA notification subdev support
- *
- * Copyright (C) 2019 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/remoteproc.h>
-#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
-
-static void
-ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
-{
-       struct qcom_rproc_ipa_notify *ipa_notify;
-       qcom_ipa_notify_t notify;
-
-       ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
-       notify = ipa_notify->notify;
-       if (notify)
-               notify(ipa_notify->data, event);
-}
-
-static int ipa_notify_prepare(struct rproc_subdev *subdev)
-{
-       ipa_notify_common(subdev, MODEM_STARTING);
-
-       return 0;
-}
-
-static int ipa_notify_start(struct rproc_subdev *subdev)
-{
-       ipa_notify_common(subdev, MODEM_RUNNING);
-
-       return 0;
-}
-
-static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
-
-{
-       ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
-}
-
-static void ipa_notify_unprepare(struct rproc_subdev *subdev)
-{
-       ipa_notify_common(subdev, MODEM_OFFLINE);
-}
-
-static void ipa_notify_removing(struct rproc_subdev *subdev)
-{
-       ipa_notify_common(subdev, MODEM_REMOVING);
-}
-
-/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
-void qcom_add_ipa_notify_subdev(struct rproc *rproc,
-               struct qcom_rproc_ipa_notify *ipa_notify)
-{
-       ipa_notify->notify = NULL;
-       ipa_notify->data = NULL;
-       ipa_notify->subdev.prepare = ipa_notify_prepare;
-       ipa_notify->subdev.start = ipa_notify_start;
-       ipa_notify->subdev.stop = ipa_notify_stop;
-       ipa_notify->subdev.unprepare = ipa_notify_unprepare;
-
-       rproc_add_subdev(rproc, &ipa_notify->subdev);
-}
-EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
-
-/* Remove the IPA notification subdevice */
-void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
-               struct qcom_rproc_ipa_notify *ipa_notify)
-{
-       struct rproc_subdev *subdev = &ipa_notify->subdev;
-
-       ipa_notify_removing(subdev);
-
-       rproc_remove_subdev(rproc, subdev);
-       ipa_notify->notify = NULL;      /* Make it obvious */
-}
-EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
index 903b2bb..c401bcc 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/devcoredump.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -22,7 +23,6 @@
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/remoteproc.h>
-#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
 #include <linux/reset.h>
 #include <linux/soc/qcom/mdt_loader.h>
 #include <linux/iopoll.h>
 
 #include "remoteproc_internal.h"
 #include "qcom_common.h"
+#include "qcom_pil_info.h"
 #include "qcom_q6v5.h"
 
 #include <linux/qcom_scm.h>
 
 #define MPSS_CRASH_REASON_SMEM         421
 
+#define MBA_LOG_SIZE                   SZ_4K
+
 /* RMB Status Register Values */
 #define RMB_PBL_SUCCESS                        0x1
 
 #define QDSP6SS_SLEEP                   0x3C
 #define QDSP6SS_BOOT_CORE_START         0x400
 #define QDSP6SS_BOOT_CMD                0x404
-#define QDSP6SS_BOOT_STATUS            0x408
-#define BOOT_STATUS_TIMEOUT_US         200
 #define BOOT_FSM_TIMEOUT                10000
 
 struct reg_info {
@@ -140,6 +141,7 @@ struct rproc_hexagon_res {
        int version;
        bool need_mem_protection;
        bool has_alt_reset;
+       bool has_mba_logs;
        bool has_spare_reg;
 };
 
@@ -179,15 +181,14 @@ struct q6v5 {
        int active_reg_count;
        int proxy_reg_count;
 
-       bool running;
-
        bool dump_mba_loaded;
-       unsigned long dump_segment_mask;
-       unsigned long dump_complete_mask;
+       size_t current_dump_size;
+       size_t total_dump_size;
 
        phys_addr_t mba_phys;
        void *mba_region;
        size_t mba_size;
+       size_t dp_size;
 
        phys_addr_t mpss_phys;
        phys_addr_t mpss_reloc;
@@ -196,10 +197,10 @@ struct q6v5 {
        struct qcom_rproc_glink glink_subdev;
        struct qcom_rproc_subdev smd_subdev;
        struct qcom_rproc_ssr ssr_subdev;
-       struct qcom_rproc_ipa_notify ipa_notify_subdev;
        struct qcom_sysmon *sysmon;
        bool need_mem_protection;
        bool has_alt_reset;
+       bool has_mba_logs;
        bool has_spare_reg;
        int mpss_perm;
        int mba_perm;
@@ -404,11 +405,33 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
                                   current_perm, next, perms);
 }
 
+static void q6v5_debug_policy_load(struct q6v5 *qproc)
+{
+       const struct firmware *dp_fw;
+
+       if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
+               return;
+
+       if (SZ_1M + dp_fw->size <= qproc->mba_size) {
+               memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+               qproc->dp_size = dp_fw->size;
+       }
+
+       release_firmware(dp_fw);
+}
+
 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct q6v5 *qproc = rproc->priv;
 
+       /* MBA is restricted to a maximum size of 1M */
+       if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+               dev_err(qproc->dev, "MBA firmware load failed\n");
+               return -EINVAL;
+       }
+
        memcpy(qproc->mba_region, fw->data, fw->size);
+       q6v5_debug_policy_load(qproc);
 
        return 0;
 }
@@ -511,6 +534,26 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
        return val;
 }
 
+static void q6v5_dump_mba_logs(struct q6v5 *qproc)
+{
+       struct rproc *rproc = qproc->rproc;
+       void *data;
+
+       if (!qproc->has_mba_logs)
+               return;
+
+       if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
+                                   qproc->mba_size))
+               return;
+
+       data = vmalloc(MBA_LOG_SIZE);
+       if (!data)
+               return;
+
+       memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
+       dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+}
+
 static int q6v5proc_reset(struct q6v5 *qproc)
 {
        u32 val;
@@ -579,13 +622,15 @@ static int q6v5proc_reset(struct q6v5 *qproc)
                /* De-assert the Q6 stop core signal */
                writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
 
+               /* Wait for 10 us for any staggering logic to settle */
+               usleep_range(10, 20);
+
                /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
                writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
 
-               /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
-               ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
-                                        val, (val & BIT(0)) != 0, 1,
-                                        BOOT_STATUS_TIMEOUT_US);
+               /* Poll the MSS_STATUS for FSM completion */
+               ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+                                        val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
                if (ret) {
                        dev_err(qproc->dev, "Boot FSM failed to complete.\n");
                        /* Reset the modem so that boot FSM is in reset state */
@@ -829,6 +874,7 @@ static int q6v5_mba_load(struct q6v5 *qproc)
 {
        int ret;
        int xfermemop_ret;
+       bool mba_load_err = false;
 
        qcom_q6v5_prepare(&qproc->q6v5);
 
@@ -895,6 +941,10 @@ static int q6v5_mba_load(struct q6v5 *qproc)
        }
 
        writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+       if (qproc->dp_size) {
+               writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+               writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+       }
 
        ret = q6v5proc_reset(qproc);
        if (ret)
@@ -918,7 +968,7 @@ halt_axi_ports:
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
+       mba_load_err = true;
 reclaim_mba:
        xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
                                                false, qproc->mba_phys,
@@ -926,6 +976,8 @@ reclaim_mba:
        if (xfermemop_ret) {
                dev_err(qproc->dev,
                        "Failed to reclaim mba buffer, system may become unstable\n");
+       } else if (mba_load_err) {
+               q6v5_dump_mba_logs(qproc);
        }
 
 disable_active_clks:
@@ -961,6 +1013,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
        u32 val;
 
        qproc->dump_mba_loaded = false;
+       qproc->dp_size = 0;
 
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
        q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
@@ -1139,15 +1192,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
                } else if (phdr->p_filesz) {
                        /* Replace "xxx.xxx" with "xxx.bxx" */
                        sprintf(fw_name + fw_name_len - 3, "b%02d", i);
-                       ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+                       ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
+                                                       ptr, phdr->p_filesz);
                        if (ret) {
                                dev_err(qproc->dev, "failed to load %s\n", fw_name);
                                iounmap(ptr);
                                goto release_firmware;
                        }
 
-                       memcpy(ptr, seg_fw->data, seg_fw->size);
-
                        release_firmware(seg_fw);
                }
 
@@ -1190,6 +1242,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
        else if (ret < 0)
                dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
 
+       qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
+
 release_firmware:
        release_firmware(fw);
 out:
@@ -1200,11 +1254,10 @@ out:
 
 static void qcom_q6v5_dump_segment(struct rproc *rproc,
                                   struct rproc_dump_segment *segment,
-                                  void *dest)
+                                  void *dest, size_t cp_offset, size_t size)
 {
        int ret = 0;
        struct q6v5 *qproc = rproc->priv;
-       unsigned long mask = BIT((unsigned long)segment->priv);
        int offset = segment->da - qproc->mpss_reloc;
        void *ptr = NULL;
 
@@ -1221,19 +1274,19 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
        }
 
        if (!ret)
-               ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
+               ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size);
 
        if (ptr) {
-               memcpy(dest, ptr, segment->size);
+               memcpy(dest, ptr, size);
                iounmap(ptr);
        } else {
-               memset(dest, 0xff, segment->size);
+               memset(dest, 0xff, size);
        }
 
-       qproc->dump_segment_mask |= mask;
+       qproc->current_dump_size += size;
 
        /* Reclaim mba after copying segments */
-       if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
+       if (qproc->current_dump_size == qproc->total_dump_size) {
                if (qproc->dump_mba_loaded) {
                        /* Try to reset ownership back to Q6 */
                        q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
@@ -1255,7 +1308,8 @@ static int q6v5_start(struct rproc *rproc)
        if (ret)
                return ret;
 
-       dev_info(qproc->dev, "MBA booted, loading mpss\n");
+       dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
+                qproc->dp_size ? "" : "out");
 
        ret = q6v5_mpss_load(qproc);
        if (ret)
@@ -1275,13 +1329,13 @@ static int q6v5_start(struct rproc *rproc)
                        "Failed to reclaim mba buffer system may become unstable\n");
 
        /* Reset Dump Segment Mask */
-       qproc->dump_segment_mask = 0;
-       qproc->running = true;
+       qproc->current_dump_size = 0;
 
        return 0;
 
 reclaim_mpss:
        q6v5_mba_reclaim(qproc);
+       q6v5_dump_mba_logs(qproc);
 
        return ret;
 }
@@ -1291,8 +1345,6 @@ static int q6v5_stop(struct rproc *rproc)
        struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
        int ret;
 
-       qproc->running = false;
-
        ret = qcom_q6v5_request_stop(&qproc->q6v5);
        if (ret == -ETIMEDOUT)
                dev_err(qproc->dev, "timed out on wait\n");
@@ -1324,7 +1376,7 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
 
        ehdr = (struct elf32_hdr *)fw->data;
        phdrs = (struct elf32_phdr *)(ehdr + 1);
-       qproc->dump_complete_mask = 0;
+       qproc->total_dump_size = 0;
 
        for (i = 0; i < ehdr->e_phnum; i++) {
                phdr = &phdrs[i];
@@ -1335,11 +1387,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
                ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
                                                        phdr->p_memsz,
                                                        qcom_q6v5_dump_segment,
-                                                       (void *)i);
+                                                       NULL);
                if (ret)
                        break;
 
-               qproc->dump_complete_mask |= BIT(i);
+               qproc->total_dump_size += phdr->p_memsz;
        }
 
        release_firmware(fw);
@@ -1554,39 +1606,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
-
-/* Register IPA notification function */
-int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
-                            void *data)
-{
-       struct qcom_rproc_ipa_notify *ipa_notify;
-       struct q6v5 *qproc = rproc->priv;
-
-       if (!notify)
-               return -EINVAL;
-
-       ipa_notify = &qproc->ipa_notify_subdev;
-       if (ipa_notify->notify)
-               return -EBUSY;
-
-       ipa_notify->notify = notify;
-       ipa_notify->data = data;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
-
-/* Deregister IPA notification function */
-void qcom_deregister_ipa_notify(struct rproc *rproc)
-{
-       struct q6v5 *qproc = rproc->priv;
-
-       qproc->ipa_notify_subdev.notify = NULL;
-}
-EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
-#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
 static int q6v5_probe(struct platform_device *pdev)
 {
        const struct rproc_hexagon_res *desc;
@@ -1701,6 +1720,7 @@ static int q6v5_probe(struct platform_device *pdev)
 
        qproc->version = desc->version;
        qproc->need_mem_protection = desc->need_mem_protection;
+       qproc->has_mba_logs = desc->has_mba_logs;
 
        ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
                             qcom_msa_handover);
@@ -1712,7 +1732,6 @@ static int q6v5_probe(struct platform_device *pdev)
        qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
        qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
        qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
-       qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
        qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
        if (IS_ERR(qproc->sysmon)) {
                ret = PTR_ERR(qproc->sysmon);
@@ -1728,7 +1747,6 @@ static int q6v5_probe(struct platform_device *pdev)
 remove_sysmon_subdev:
        qcom_remove_sysmon_subdev(qproc->sysmon);
 remove_subdevs:
-       qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
        qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
        qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
        qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1750,7 +1768,6 @@ static int q6v5_remove(struct platform_device *pdev)
        rproc_del(rproc);
 
        qcom_remove_sysmon_subdev(qproc->sysmon);
-       qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
        qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
        qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
        qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -1792,6 +1809,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = false,
+       .has_mba_logs = true,
        .has_spare_reg = true,
        .version = MSS_SC7180,
 };
@@ -1827,6 +1845,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = true,
+       .has_mba_logs = false,
        .has_spare_reg = false,
        .version = MSS_SDM845,
 };
@@ -1854,6 +1873,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = false,
+       .has_mba_logs = false,
        .has_spare_reg = false,
        .version = MSS_MSM8998,
 };
@@ -1884,6 +1904,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
        },
        .need_mem_protection = true,
        .has_alt_reset = false,
+       .has_mba_logs = false,
        .has_spare_reg = false,
        .version = MSS_MSM8996,
 };
@@ -1917,6 +1938,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
        },
        .need_mem_protection = false,
        .has_alt_reset = false,
+       .has_mba_logs = false,
        .has_spare_reg = false,
        .version = MSS_MSM8916,
 };
@@ -1958,6 +1980,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
        },
        .need_mem_protection = false,
        .has_alt_reset = false,
+       .has_mba_logs = false,
        .has_spare_reg = false,
        .version = MSS_MSM8974,
 };
index 61791a0..3837f23 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/soc/qcom/smem_state.h>
 
 #include "qcom_common.h"
+#include "qcom_pil_info.h"
 #include "qcom_q6v5.h"
 #include "remoteproc_internal.h"
 
@@ -64,6 +65,7 @@ struct qcom_adsp {
        int pas_id;
        int crash_reason_smem;
        bool has_aggre2_clk;
+       const char *info_name;
 
        struct completion start_done;
        struct completion stop_done;
@@ -117,11 +119,17 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
 static int adsp_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+       int ret;
 
-       return qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
-                            adsp->mem_region, adsp->mem_phys, adsp->mem_size,
-                            &adsp->mem_reloc);
+       ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+                           adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+                           &adsp->mem_reloc);
+       if (ret)
+               return ret;
 
+       qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+       return 0;
 }
 
 static int adsp_start(struct rproc *rproc)
@@ -405,6 +413,7 @@ static int adsp_probe(struct platform_device *pdev)
        adsp->rproc = rproc;
        adsp->pas_id = desc->pas_id;
        adsp->has_aggre2_clk = desc->has_aggre2_clk;
+       adsp->info_name = desc->sysmon_name;
        platform_set_drvdata(pdev, adsp);
 
        device_wakeup_enable(adsp->dev);
index 88c76b9..8846ef0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/reset.h>
 #include <linux/soc/qcom/mdt_loader.h>
 #include "qcom_common.h"
+#include "qcom_pil_info.h"
 #include "qcom_q6v5.h"
 
 #define WCSS_CRASH_REASON              421
@@ -424,10 +425,17 @@ static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len)
 static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct q6v5_wcss *wcss = rproc->priv;
+       int ret;
+
+       ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+                                   0, wcss->mem_region, wcss->mem_phys,
+                                   wcss->mem_size, &wcss->mem_reloc);
+       if (ret)
+               return ret;
+
+       qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
 
-       return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
-                                    0, wcss->mem_region, wcss->mem_phys,
-                                    wcss->mem_size, &wcss->mem_reloc);
+       return ret;
 }
 
 static const struct rproc_ops q6v5_wcss_ops = {
index 8d8996d..9eb2f6b 100644 (file)
@@ -71,7 +71,7 @@ static LIST_HEAD(sysmon_list);
 /**
  * sysmon_send_event() - send notification of other remote's SSR event
  * @sysmon:    sysmon context
- * @name:      other remote's name
+ * @event:     sysmon event context
  */
 static void sysmon_send_event(struct qcom_sysmon *sysmon,
                              const struct sysmon_event *event)
@@ -343,7 +343,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
 /**
  * ssctl_send_event() - send notification of other remote's SSR event
  * @sysmon:    sysmon context
- * @name:      other remote's name
+ * @event:     sysmon event context
  */
 static void ssctl_send_event(struct qcom_sysmon *sysmon,
                             const struct sysmon_event *event)
index 5d65e1a..e2573f7 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "qcom_common.h"
 #include "remoteproc_internal.h"
+#include "qcom_pil_info.h"
 #include "qcom_wcnss.h"
 
 #define WCNSS_CRASH_REASON_SMEM                422
@@ -145,10 +146,17 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
 static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
 {
        struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+       int ret;
+
+       ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
+                           wcnss->mem_region, wcnss->mem_phys,
+                           wcnss->mem_size, &wcnss->mem_reloc);
+       if (ret)
+               return ret;
+
+       qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
 
-       return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
-                            wcnss->mem_region, wcnss->mem_phys,
-                            wcnss->mem_size, &wcnss->mem_reloc);
+       return 0;
 }
 
 static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
new file mode 100644 (file)
index 0000000..b19ea30
--- /dev/null
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Character device interface driver for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/remoteproc_cdev.h>
+
+#include "remoteproc_internal.h"
+
+#define NUM_RPROC_DEVICES      64
+static dev_t rproc_major;
+
+static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
+{
+       struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+       int ret = 0;
+       char cmd[10];
+
+       if (!len || len > sizeof(cmd))
+               return -EINVAL;
+
+       ret = copy_from_user(cmd, buf, len);
+       if (ret)
+               return -EFAULT;
+
+       if (!strncmp(cmd, "start", len)) {
+               if (rproc->state == RPROC_RUNNING)
+                       return -EBUSY;
+
+               ret = rproc_boot(rproc);
+       } else if (!strncmp(cmd, "stop", len)) {
+               if (rproc->state != RPROC_RUNNING)
+                       return -EINVAL;
+
+               rproc_shutdown(rproc);
+       } else {
+               dev_err(&rproc->dev, "Unrecognized option\n");
+               ret = -EINVAL;
+       }
+
+       return ret ? ret : len;
+}
+
+static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+       void __user *argp = (void __user *)arg;
+       s32 param;
+
+       switch (ioctl) {
+       case RPROC_SET_SHUTDOWN_ON_RELEASE:
+               if (copy_from_user(&param, argp, sizeof(s32)))
+                       return -EFAULT;
+
+               rproc->cdev_put_on_release = !!param;
+               break;
+       case RPROC_GET_SHUTDOWN_ON_RELEASE:
+               param = (s32)rproc->cdev_put_on_release;
+               if (copy_to_user(argp, &param, sizeof(s32)))
+                       return -EFAULT;
+
+               break;
+       default:
+               dev_err(&rproc->dev, "Unsupported ioctl\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int rproc_cdev_release(struct inode *inode, struct file *filp)
+{
+       struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+
+       if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING)
+               rproc_shutdown(rproc);
+
+       return 0;
+}
+
+static const struct file_operations rproc_fops = {
+       .write = rproc_cdev_write,
+       .unlocked_ioctl = rproc_device_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+       .release = rproc_cdev_release,
+};
+
+int rproc_char_device_add(struct rproc *rproc)
+{
+       int ret;
+
+       cdev_init(&rproc->cdev, &rproc_fops);
+       rproc->cdev.owner = THIS_MODULE;
+
+       rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
+       cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
+       ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
+       if (ret < 0)
+               dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
+
+       return ret;
+}
+
+void rproc_char_device_remove(struct rproc *rproc)
+{
+       __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc");
+}
+
+void __init rproc_init_cdev(void)
+{
+       int ret;
+
+       ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
+       if (ret < 0)
+               pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
+}
index 9f04c30..7f90eee 100644 (file)
 #include <linux/firmware.h>
 #include <linux/string.h>
 #include <linux/debugfs.h>
-#include <linux/devcoredump.h>
 #include <linux/rculist.h>
 #include <linux/remoteproc.h>
-#include <linux/pm_runtime.h>
 #include <linux/iommu.h>
 #include <linux/idr.h>
 #include <linux/elf.h>
@@ -41,7 +39,6 @@
 #include <linux/platform_device.h>
 
 #include "remoteproc_internal.h"
-#include "remoteproc_elf_helpers.h"
 
 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
 
@@ -244,6 +241,7 @@ EXPORT_SYMBOL(rproc_da_to_va);
  *
  * Return: a valid pointer on carveout entry on success or NULL on failure.
  */
+__printf(2, 3)
 struct rproc_mem_entry *
 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
 {
@@ -411,10 +409,22 @@ void rproc_free_vring(struct rproc_vring *rvring)
 
        idr_remove(&rproc->notifyids, rvring->notifyid);
 
-       /* reset resource entry info */
-       rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
-       rsc->vring[idx].da = 0;
-       rsc->vring[idx].notifyid = -1;
+       /*
+        * At this point rproc_stop() has been called and the installed resource
+        * table in the remote processor memory may no longer be accessible. As
+        * such and as per rproc_stop(), rproc->table_ptr points to the cached
+        * resource table (rproc->cached_table).  The cached resource table is
+        * only available when a remote processor has been booted by the
+        * remoteproc core, otherwise it is NULL.
+        *
+        * Based on the above, reset the virtio device section in the cached
+        * resource table only if there is one to work with.
+        */
+       if (rproc->table_ptr) {
+               rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+               rsc->vring[idx].da = 0;
+               rsc->vring[idx].notifyid = -1;
+       }
 }
 
 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
@@ -967,6 +977,7 @@ EXPORT_SYMBOL(rproc_add_carveout);
  * This function allocates a rproc_mem_entry struct and fill it with parameters
  * provided by client.
  */
+__printf(8, 9)
 struct rproc_mem_entry *
 rproc_mem_entry_init(struct device *dev,
                     void *va, dma_addr_t dma, size_t len, u32 da,
@@ -1010,6 +1021,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
  * This function allocates a rproc_mem_entry struct and fill it with parameters
  * provided by client.
  */
+__printf(5, 6)
 struct rproc_mem_entry *
 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
                             u32 da, const char *name, ...)
@@ -1034,6 +1046,29 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
 }
 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
 
+/**
+ * rproc_of_parse_firmware() - parse and return the firmware-name
+ * @dev: pointer on device struct representing a rproc
+ * @index: index to use for the firmware-name retrieval
+ * @fw_name: pointer to a character string, in which the firmware
+ *           name is returned on success and unmodified otherwise.
+ *
+ * This is an OF helper function that parses a device's DT node for
+ * the "firmware-name" property and returns the firmware name pointer
+ * in @fw_name on success.
+ *
+ * Return: 0 on success, or an appropriate failure.
+ */
+int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
+{
+       int ret;
+
+       ret = of_property_read_string_index(dev->of_node, "firmware-name",
+                                           index, fw_name);
+       return ret ? ret : 0;
+}
+EXPORT_SYMBOL(rproc_of_parse_firmware);
+
 /*
  * A lookup table for resource handlers. The indices are defined in
  * enum fw_resource_type.
@@ -1239,19 +1274,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
        return 0;
 }
 
-/**
- * rproc_coredump_cleanup() - clean up dump_segments list
- * @rproc: the remote processor handle
- */
-static void rproc_coredump_cleanup(struct rproc *rproc)
-{
-       struct rproc_dump_segment *entry, *tmp;
-
-       list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
-               list_del(&entry->node);
-               kfree(entry);
-       }
-}
 
 /**
  * rproc_resource_cleanup() - clean up and free all acquired resources
@@ -1260,7 +1282,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
  * This function will free all resources acquired for @rproc, and it
  * is called whenever @rproc either shuts down or fails to boot.
  */
-static void rproc_resource_cleanup(struct rproc *rproc)
+void rproc_resource_cleanup(struct rproc *rproc)
 {
        struct rproc_mem_entry *entry, *tmp;
        struct rproc_debug_trace *trace, *ttmp;
@@ -1304,6 +1326,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
 
        rproc_coredump_cleanup(rproc);
 }
+EXPORT_SYMBOL(rproc_resource_cleanup);
 
 static int rproc_start(struct rproc *rproc, const struct firmware *fw)
 {
@@ -1370,6 +1393,48 @@ reset_table_ptr:
        return ret;
 }
 
+static int rproc_attach(struct rproc *rproc)
+{
+       struct device *dev = &rproc->dev;
+       int ret;
+
+       ret = rproc_prepare_subdevices(rproc);
+       if (ret) {
+               dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+                       rproc->name, ret);
+               goto out;
+       }
+
+       /* Attach to the remote processor */
+       ret = rproc_attach_device(rproc);
+       if (ret) {
+               dev_err(dev, "can't attach to rproc %s: %d\n",
+                       rproc->name, ret);
+               goto unprepare_subdevices;
+       }
+
+       /* Start any subdevices for the remote processor */
+       ret = rproc_start_subdevices(rproc);
+       if (ret) {
+               dev_err(dev, "failed to probe subdevices for %s: %d\n",
+                       rproc->name, ret);
+               goto stop_rproc;
+       }
+
+       rproc->state = RPROC_RUNNING;
+
+       dev_info(dev, "remote processor %s is now attached\n", rproc->name);
+
+       return 0;
+
+stop_rproc:
+       rproc->ops->stop(rproc);
+unprepare_subdevices:
+       rproc_unprepare_subdevices(rproc);
+out:
+       return ret;
+}
+
 /*
  * take a firmware and boot a remote processor with it.
  */
@@ -1383,12 +1448,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        if (ret)
                return ret;
 
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
-               return ret;
-       }
-
        dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
 
        /*
@@ -1398,7 +1457,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        ret = rproc_enable_iommu(rproc);
        if (ret) {
                dev_err(dev, "can't enable iommu: %d\n", ret);
-               goto put_pm_runtime;
+               return ret;
        }
 
        /* Prepare rproc for firmware loading if needed */
@@ -1452,8 +1511,63 @@ unprepare_rproc:
        rproc_unprepare_device(rproc);
 disable_iommu:
        rproc_disable_iommu(rproc);
-put_pm_runtime:
-       pm_runtime_put(dev);
+       return ret;
+}
+
+/*
+ * Attach to remote processor - similar to rproc_fw_boot() but without
+ * the steps that deal with the firmware image.
+ */
+static int rproc_actuate(struct rproc *rproc)
+{
+       struct device *dev = &rproc->dev;
+       int ret;
+
+       /*
+        * if enabling an IOMMU isn't relevant for this rproc, this is
+        * just a nop
+        */
+       ret = rproc_enable_iommu(rproc);
+       if (ret) {
+               dev_err(dev, "can't enable iommu: %d\n", ret);
+               return ret;
+       }
+
+       /* reset max_notifyid */
+       rproc->max_notifyid = -1;
+
+       /* reset handled vdev */
+       rproc->nb_vdev = 0;
+
+       /*
+        * Handle firmware resources required to attach to a remote processor.
+        * Because we are attaching rather than booting the remote processor,
+        * we expect the platform driver to properly set rproc->table_ptr.
+        */
+       ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+       if (ret) {
+               dev_err(dev, "Failed to process resources: %d\n", ret);
+               goto disable_iommu;
+       }
+
+       /* Allocate carveout resources associated to rproc */
+       ret = rproc_alloc_registered_carveouts(rproc);
+       if (ret) {
+               dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+                       ret);
+               goto clean_up_resources;
+       }
+
+       ret = rproc_attach(rproc);
+       if (ret)
+               goto clean_up_resources;
+
+       return 0;
+
+clean_up_resources:
+       rproc_resource_cleanup(rproc);
+disable_iommu:
+       rproc_disable_iommu(rproc);
        return ret;
 }
 
@@ -1479,6 +1593,15 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
        int ret;
 
        /*
+        * Since the remote processor is in a detached state, it has already
+        * been booted by another entity.  As such there is no point in waiting
+        * for a firmware image to be loaded, we can simply initiate the process
+        * of attaching to it immediately.
+        */
+       if (rproc->state == RPROC_DETACHED)
+               return rproc_boot(rproc);
+
+       /*
         * We're initiating an asynchronous firmware loading, so we can
         * be built-in kernel code, without hanging the boot process.
         */
@@ -1513,187 +1636,19 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
 
        rproc->state = RPROC_OFFLINE;
 
-       dev_info(dev, "stopped remote processor %s\n", rproc->name);
-
-       return 0;
-}
-
-/**
- * rproc_coredump_add_segment() - add segment of device memory to coredump
- * @rproc:     handle of a remote processor
- * @da:                device address
- * @size:      size of segment
- *
- * Add device memory to the list of segments to be included in a coredump for
- * the remoteproc.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
-{
-       struct rproc_dump_segment *segment;
-
-       segment = kzalloc(sizeof(*segment), GFP_KERNEL);
-       if (!segment)
-               return -ENOMEM;
-
-       segment->da = da;
-       segment->size = size;
-
-       list_add_tail(&segment->node, &rproc->dump_segments);
-
-       return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_add_segment);
-
-/**
- * rproc_coredump_add_custom_segment() - add custom coredump segment
- * @rproc:     handle of a remote processor
- * @da:                device address
- * @size:      size of segment
- * @dumpfn:    custom dump function called for each segment during coredump
- * @priv:      private data
- *
- * Add device memory to the list of segments to be included in the coredump
- * and associate the segment with the given custom dump function and private
- * data.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_custom_segment(struct rproc *rproc,
-                                     dma_addr_t da, size_t size,
-                                     void (*dumpfn)(struct rproc *rproc,
-                                                    struct rproc_dump_segment *segment,
-                                                    void *dest),
-                                     void *priv)
-{
-       struct rproc_dump_segment *segment;
-
-       segment = kzalloc(sizeof(*segment), GFP_KERNEL);
-       if (!segment)
-               return -ENOMEM;
-
-       segment->da = da;
-       segment->size = size;
-       segment->priv = priv;
-       segment->dump = dumpfn;
+       /*
+        * The remote processor has been stopped and is now offline, which means
+        * that the next time it is brought back online the remoteproc core will
+        * be responsible to load its firmware.  As such it is no longer
+        * autonomous.
+        */
+       rproc->autonomous = false;
 
-       list_add_tail(&segment->node, &rproc->dump_segments);
+       dev_info(dev, "stopped remote processor %s\n", rproc->name);
 
        return 0;
 }
-EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
 
-/**
- * rproc_coredump_set_elf_info() - set coredump elf information
- * @rproc:     handle of a remote processor
- * @class:     elf class for coredump elf file
- * @machine:   elf machine for coredump elf file
- *
- * Set elf information which will be used for coredump elf file.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
-{
-       if (class != ELFCLASS64 && class != ELFCLASS32)
-               return -EINVAL;
-
-       rproc->elf_class = class;
-       rproc->elf_machine = machine;
-
-       return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_set_elf_info);
-
-/**
- * rproc_coredump() - perform coredump
- * @rproc:     rproc handle
- *
- * This function will generate an ELF header for the registered segments
- * and create a devcoredump device associated with rproc.
- */
-static void rproc_coredump(struct rproc *rproc)
-{
-       struct rproc_dump_segment *segment;
-       void *phdr;
-       void *ehdr;
-       size_t data_size;
-       size_t offset;
-       void *data;
-       void *ptr;
-       u8 class = rproc->elf_class;
-       int phnum = 0;
-
-       if (list_empty(&rproc->dump_segments))
-               return;
-
-       if (class == ELFCLASSNONE) {
-               dev_err(&rproc->dev, "Elf class is not set\n");
-               return;
-       }
-
-       data_size = elf_size_of_hdr(class);
-       list_for_each_entry(segment, &rproc->dump_segments, node) {
-               data_size += elf_size_of_phdr(class) + segment->size;
-
-               phnum++;
-       }
-
-       data = vmalloc(data_size);
-       if (!data)
-               return;
-
-       ehdr = data;
-
-       memset(ehdr, 0, elf_size_of_hdr(class));
-       /* e_ident field is common for both elf32 and elf64 */
-       elf_hdr_init_ident(ehdr, class);
-
-       elf_hdr_set_e_type(class, ehdr, ET_CORE);
-       elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
-       elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
-       elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
-       elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
-       elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
-       elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
-       elf_hdr_set_e_phnum(class, ehdr, phnum);
-
-       phdr = data + elf_hdr_get_e_phoff(class, ehdr);
-       offset = elf_hdr_get_e_phoff(class, ehdr);
-       offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
-
-       list_for_each_entry(segment, &rproc->dump_segments, node) {
-               memset(phdr, 0, elf_size_of_phdr(class));
-               elf_phdr_set_p_type(class, phdr, PT_LOAD);
-               elf_phdr_set_p_offset(class, phdr, offset);
-               elf_phdr_set_p_vaddr(class, phdr, segment->da);
-               elf_phdr_set_p_paddr(class, phdr, segment->da);
-               elf_phdr_set_p_filesz(class, phdr, segment->size);
-               elf_phdr_set_p_memsz(class, phdr, segment->size);
-               elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
-               elf_phdr_set_p_align(class, phdr, 0);
-
-               if (segment->dump) {
-                       segment->dump(rproc, segment, data + offset);
-               } else {
-                       ptr = rproc_da_to_va(rproc, segment->da, segment->size);
-                       if (!ptr) {
-                               dev_err(&rproc->dev,
-                                       "invalid coredump segment (%pad, %zu)\n",
-                                       &segment->da, segment->size);
-                               memset(data + offset, 0xff, segment->size);
-                       } else {
-                               memcpy(data + offset, ptr, segment->size);
-                       }
-               }
-
-               offset += elf_phdr_get_p_filesz(class, phdr);
-               phdr += elf_size_of_phdr(class);
-       }
-
-       dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
-}
 
 /**
  * rproc_trigger_recovery() - recover a remoteproc
@@ -1815,24 +1770,30 @@ int rproc_boot(struct rproc *rproc)
                goto unlock_mutex;
        }
 
-       /* skip the boot process if rproc is already powered up */
+       /* skip the boot or attach process if rproc is already powered up */
        if (atomic_inc_return(&rproc->power) > 1) {
                ret = 0;
                goto unlock_mutex;
        }
 
-       dev_info(dev, "powering up %s\n", rproc->name);
+       if (rproc->state == RPROC_DETACHED) {
+               dev_info(dev, "attaching to %s\n", rproc->name);
 
-       /* load firmware */
-       ret = request_firmware(&firmware_p, rproc->firmware, dev);
-       if (ret < 0) {
-               dev_err(dev, "request_firmware failed: %d\n", ret);
-               goto downref_rproc;
-       }
+               ret = rproc_actuate(rproc);
+       } else {
+               dev_info(dev, "powering up %s\n", rproc->name);
 
-       ret = rproc_fw_boot(rproc, firmware_p);
+               /* load firmware */
+               ret = request_firmware(&firmware_p, rproc->firmware, dev);
+               if (ret < 0) {
+                       dev_err(dev, "request_firmware failed: %d\n", ret);
+                       goto downref_rproc;
+               }
 
-       release_firmware(firmware_p);
+               ret = rproc_fw_boot(rproc, firmware_p);
+
+               release_firmware(firmware_p);
+       }
 
 downref_rproc:
        if (ret)
@@ -1891,8 +1852,6 @@ void rproc_shutdown(struct rproc *rproc)
 
        rproc_disable_iommu(rproc);
 
-       pm_runtime_put(dev);
-
        /* Free the copy of the resource table */
        kfree(rproc->cached_table);
        rproc->cached_table = NULL;
@@ -1952,6 +1911,43 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
 #endif
 EXPORT_SYMBOL(rproc_get_by_phandle);
 
+static int rproc_validate(struct rproc *rproc)
+{
+       switch (rproc->state) {
+       case RPROC_OFFLINE:
+               /*
+                * An offline processor without a start()
+                * function makes no sense.
+                */
+               if (!rproc->ops->start)
+                       return -EINVAL;
+               break;
+       case RPROC_DETACHED:
+               /*
+                * A remote processor in a detached state without an
+                * attach() function makes not sense.
+                */
+               if (!rproc->ops->attach)
+                       return -EINVAL;
+               /*
+                * When attaching to a remote processor the device memory
+                * is already available and as such there is no need to have a
+                * cached table.
+                */
+               if (rproc->cached_table)
+                       return -EINVAL;
+               break;
+       default:
+               /*
+                * When adding a remote processor, the state of the device
+                * can be offline or detached, nothing else.
+                */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  * rproc_add() - register a remote processor
  * @rproc: the remote processor handle to register
@@ -1981,11 +1977,30 @@ int rproc_add(struct rproc *rproc)
        if (ret < 0)
                return ret;
 
+       ret = rproc_validate(rproc);
+       if (ret < 0)
+               return ret;
+
        dev_info(dev, "%s is available\n", rproc->name);
 
        /* create debugfs entries */
        rproc_create_debug_dir(rproc);
 
+       /* add char device for this remoteproc */
+       ret = rproc_char_device_add(rproc);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Remind ourselves the remote processor has been attached to rather
+        * than booted by the remoteproc core.  This is important because the
+        * RPROC_DETACHED state will be lost as soon as the remote processor
+        * has been attached to.  Used in firmware_show() and reset in
+        * rproc_stop().
+        */
+       if (rproc->state == RPROC_DETACHED)
+               rproc->autonomous = true;
+
        /* if rproc is marked always-on, request it to boot */
        if (rproc->auto_boot) {
                ret = rproc_trigger_auto_boot(rproc);
@@ -2183,9 +2198,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
 
        rproc->state = RPROC_OFFLINE;
 
-       pm_runtime_no_callbacks(&rproc->dev);
-       pm_runtime_enable(&rproc->dev);
-
        return rproc;
 
 put_device:
@@ -2205,7 +2217,6 @@ EXPORT_SYMBOL(rproc_alloc);
  */
 void rproc_free(struct rproc *rproc)
 {
-       pm_runtime_disable(&rproc->dev);
        put_device(&rproc->dev);
 }
 EXPORT_SYMBOL(rproc_free);
@@ -2256,6 +2267,7 @@ int rproc_del(struct rproc *rproc)
        mutex_unlock(&rproc->lock);
 
        rproc_delete_debug_dir(rproc);
+       rproc_char_device_remove(rproc);
 
        /* the rproc is downref'ed as soon as it's removed from the klist */
        mutex_lock(&rproc_list_mutex);
@@ -2424,6 +2436,7 @@ static int __init remoteproc_init(void)
 {
        rproc_init_sysfs();
        rproc_init_debugfs();
+       rproc_init_cdev();
        rproc_init_panic();
 
        return 0;
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
new file mode 100644 (file)
index 0000000..bb15a29
--- /dev/null
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Coredump functionality for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+struct rproc_coredump_state {
+       struct rproc *rproc;
+       void *header;
+       struct completion dump_done;
+};
+
+/**
+ * rproc_coredump_cleanup() - clean up dump_segments list
+ * @rproc: the remote processor handle
+ */
+void rproc_coredump_cleanup(struct rproc *rproc)
+{
+       struct rproc_dump_segment *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+               list_del(&entry->node);
+               kfree(entry);
+       }
+}
+
+/**
+ * rproc_coredump_add_segment() - add segment of device memory to coredump
+ * @rproc:     handle of a remote processor
+ * @da:                device address
+ * @size:      size of segment
+ *
+ * Add device memory to the list of segments to be included in a coredump for
+ * the remoteproc.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
+{
+       struct rproc_dump_segment *segment;
+
+       segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+       if (!segment)
+               return -ENOMEM;
+
+       segment->da = da;
+       segment->size = size;
+
+       list_add_tail(&segment->node, &rproc->dump_segments);
+
+       return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_segment);
+
+/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc:     handle of a remote processor
+ * @da:                device address
+ * @size:      size of segment
+ * @dumpfn:    custom dump function called for each segment during coredump
+ * @priv:      private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+                                     dma_addr_t da, size_t size,
+                                     void (*dumpfn)(struct rproc *rproc,
+                                                    struct rproc_dump_segment *segment,
+                                                    void *dest, size_t offset,
+                                                    size_t size),
+                                     void *priv)
+{
+       struct rproc_dump_segment *segment;
+
+       segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+       if (!segment)
+               return -ENOMEM;
+
+       segment->da = da;
+       segment->size = size;
+       segment->priv = priv;
+       segment->dump = dumpfn;
+
+       list_add_tail(&segment->node, &rproc->dump_segments);
+
+       return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc:     handle of a remote processor
+ * @class:     elf class for coredump elf file
+ * @machine:   elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+       if (class != ELFCLASS64 && class != ELFCLASS32)
+               return -EINVAL;
+
+       rproc->elf_class = class;
+       rproc->elf_machine = machine;
+
+       return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+static void rproc_coredump_free(void *data)
+{
+       struct rproc_coredump_state *dump_state = data;
+
+       vfree(dump_state->header);
+       complete(&dump_state->dump_done);
+}
+
+static void *rproc_coredump_find_segment(loff_t user_offset,
+                                        struct list_head *segments,
+                                        size_t *data_left)
+{
+       struct rproc_dump_segment *segment;
+
+       list_for_each_entry(segment, segments, node) {
+               if (user_offset < segment->size) {
+                       *data_left = segment->size - user_offset;
+                       return segment;
+               }
+               user_offset -= segment->size;
+       }
+
+       *data_left = 0;
+       return NULL;
+}
+
+static void rproc_copy_segment(struct rproc *rproc, void *dest,
+                              struct rproc_dump_segment *segment,
+                              size_t offset, size_t size)
+{
+       void *ptr;
+
+       if (segment->dump) {
+               segment->dump(rproc, segment, dest, offset, size);
+       } else {
+               ptr = rproc_da_to_va(rproc, segment->da + offset, size);
+               if (!ptr) {
+                       dev_err(&rproc->dev,
+                               "invalid copy request for segment %pad with offset %zu and size %zu)\n",
+                               &segment->da, offset, size);
+                       memset(dest, 0xff, size);
+               } else {
+                       memcpy(dest, ptr, size);
+               }
+       }
+}
+
+static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
+                                  void *data, size_t header_sz)
+{
+       size_t seg_data, bytes_left = count;
+       ssize_t copy_sz;
+       struct rproc_dump_segment *seg;
+       struct rproc_coredump_state *dump_state = data;
+       struct rproc *rproc = dump_state->rproc;
+       void *elfcore = dump_state->header;
+
+       /* Copy the vmalloc'ed header first. */
+       if (offset < header_sz) {
+               copy_sz = memory_read_from_buffer(buffer, count, &offset,
+                                                 elfcore, header_sz);
+
+               return copy_sz;
+       }
+
+       /*
+        * Find out the segment memory chunk to be copied based on offset.
+        * Keep copying data until count bytes are read.
+        */
+       while (bytes_left) {
+               seg = rproc_coredump_find_segment(offset - header_sz,
+                                                 &rproc->dump_segments,
+                                                 &seg_data);
+               /* EOF check */
+               if (!seg) {
+                       dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
+                                offset);
+                       break;
+               }
+
+               copy_sz = min_t(size_t, bytes_left, seg_data);
+
+               rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
+                                  copy_sz);
+
+               offset += copy_sz;
+               buffer += copy_sz;
+               bytes_left -= copy_sz;
+       }
+
+       return count - bytes_left;
+}
+
+/**
+ * rproc_coredump() - perform coredump
+ * @rproc:     rproc handle
+ *
+ * This function will generate an ELF header for the registered segments
+ * and create a devcoredump device associated with rproc. Based on the
+ * coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump(struct rproc *rproc)
+{
+       struct rproc_dump_segment *segment;
+       void *phdr;
+       void *ehdr;
+       size_t data_size;
+       size_t offset;
+       void *data;
+       u8 class = rproc->elf_class;
+       int phnum = 0;
+       struct rproc_coredump_state dump_state;
+       enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
+
+       if (list_empty(&rproc->dump_segments) ||
+           dump_conf == RPROC_COREDUMP_DISABLED)
+               return;
+
+       if (class == ELFCLASSNONE) {
+               dev_err(&rproc->dev, "Elf class is not set\n");
+               return;
+       }
+
+       data_size = elf_size_of_hdr(class);
+       list_for_each_entry(segment, &rproc->dump_segments, node) {
+               /*
+                * For default configuration buffer includes headers & segments.
+                * For inline dump buffer just includes headers as segments are
+                * directly read from device memory.
+                */
+               data_size += elf_size_of_phdr(class);
+               if (dump_conf == RPROC_COREDUMP_DEFAULT)
+                       data_size += segment->size;
+
+               phnum++;
+       }
+
+       data = vmalloc(data_size);
+       if (!data)
+               return;
+
+       ehdr = data;
+
+       memset(ehdr, 0, elf_size_of_hdr(class));
+       /* e_ident field is common for both elf32 and elf64 */
+       elf_hdr_init_ident(ehdr, class);
+
+       elf_hdr_set_e_type(class, ehdr, ET_CORE);
+       elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+       elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+       elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+       elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+       elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+       elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+       elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+       phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+       offset = elf_hdr_get_e_phoff(class, ehdr);
+       offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
+       list_for_each_entry(segment, &rproc->dump_segments, node) {
+               memset(phdr, 0, elf_size_of_phdr(class));
+               elf_phdr_set_p_type(class, phdr, PT_LOAD);
+               elf_phdr_set_p_offset(class, phdr, offset);
+               elf_phdr_set_p_vaddr(class, phdr, segment->da);
+               elf_phdr_set_p_paddr(class, phdr, segment->da);
+               elf_phdr_set_p_filesz(class, phdr, segment->size);
+               elf_phdr_set_p_memsz(class, phdr, segment->size);
+               elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+               elf_phdr_set_p_align(class, phdr, 0);
+
+               if (dump_conf == RPROC_COREDUMP_DEFAULT)
+                       rproc_copy_segment(rproc, data + offset, segment, 0,
+                                          segment->size);
+
+               offset += elf_phdr_get_p_filesz(class, phdr);
+               phdr += elf_size_of_phdr(class);
+       }
+       if (dump_conf == RPROC_COREDUMP_DEFAULT) {
+               dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+               return;
+       }
+
+       /* Initialize the dump state struct to be used by rproc_coredump_read */
+       dump_state.rproc = rproc;
+       dump_state.header = data;
+       init_completion(&dump_state.dump_done);
+
+       dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+                     rproc_coredump_read, rproc_coredump_free);
+
+       /*
+        * Wait until the dump is read and free is called. Data is freed
+        * by devcoredump framework automatically after 5 minutes.
+        */
+       wait_for_completion(&dump_state.dump_done);
+}
index 732770e..2e3b3e2 100644 (file)
 static struct dentry *rproc_dbg;
 
 /*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via debugfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+       [RPROC_COREDUMP_DEFAULT]        = "default",
+       [RPROC_COREDUMP_INLINE]         = "inline",
+       [RPROC_COREDUMP_DISABLED]       = "disabled",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
+                                  size_t count, loff_t *ppos)
+{
+       struct rproc *rproc = filp->private_data;
+       char buf[20];
+       int len;
+
+       len = scnprintf(buf, sizeof(buf), "%s\n",
+                       rproc_coredump_str[rproc->dump_conf]);
+
+       return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+/*
+ * By writing to the 'coredump' debugfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' debugfs entry supports these commands:
+ *
+ * default:    This is the default coredump mechanism. When the remoteproc
+ *             crashes the entire coredump will be copied to a separate buffer
+ *             and exposed to userspace.
+ *
+ * inline:     The coredump will not be copied to a separate buffer and the
+ *             recovery process will have to wait until data is read by
+ *             userspace. But this avoid usage of extra memory.
+ *
+ * disabled:   This will disable coredump. Recovery will proceed without
+ *             collecting any dump.
+ */
+static ssize_t rproc_coredump_write(struct file *filp,
+                                   const char __user *user_buf, size_t count,
+                                   loff_t *ppos)
+{
+       struct rproc *rproc = filp->private_data;
+       int ret, err = 0;
+       char buf[20];
+
+       if (count > sizeof(buf))
+               return -EINVAL;
+
+       ret = copy_from_user(buf, user_buf, count);
+       if (ret)
+               return -EFAULT;
+
+       /* remove end of line */
+       if (buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+
+       if (rproc->state == RPROC_CRASHED) {
+               dev_err(&rproc->dev, "can't change coredump configuration\n");
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (!strncmp(buf, "disable", count)) {
+               rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+       } else if (!strncmp(buf, "inline", count)) {
+               rproc->dump_conf = RPROC_COREDUMP_INLINE;
+       } else if (!strncmp(buf, "default", count)) {
+               rproc->dump_conf = RPROC_COREDUMP_DEFAULT;
+       } else {
+               dev_err(&rproc->dev, "Invalid coredump configuration\n");
+               err = -EINVAL;
+       }
+out:
+       return err ? err : count;
+}
+
+static const struct file_operations rproc_coredump_fops = {
+       .read = rproc_coredump_read,
+       .write = rproc_coredump_write,
+       .open = simple_open,
+       .llseek = generic_file_llseek,
+};
+
+/*
  * Some remote processors may support dumping trace logs into a shared
  * memory buffer. We expose this trace buffer using debugfs, so users
  * can easily tell what's going on remotely.
@@ -337,6 +425,8 @@ void rproc_create_debug_dir(struct rproc *rproc)
                            rproc, &rproc_rsc_table_fops);
        debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
                            rproc, &rproc_carveouts_fops);
+       debugfs_create_file("coredump", 0600, rproc->dbg_dir,
+                           rproc, &rproc_coredump_fops);
 }
 
 void __init rproc_init_debugfs(void)
index 4ba7cb5..c340028 100644 (file)
@@ -28,6 +28,8 @@ struct rproc_debug_trace {
 void rproc_release(struct kref *kref);
 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
 void rproc_vdev_release(struct kref *ref);
+int rproc_of_parse_firmware(struct device *dev, int index,
+                           const char **fw_name);
 
 /* from remoteproc_virtio.c */
 int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
@@ -47,6 +49,38 @@ extern struct class rproc_class;
 int rproc_init_sysfs(void);
 void rproc_exit_sysfs(void);
 
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+
+#ifdef CONFIG_REMOTEPROC_CDEV
+void rproc_init_cdev(void);
+void rproc_exit_cdev(void);
+int rproc_char_device_add(struct rproc *rproc);
+void rproc_char_device_remove(struct rproc *rproc);
+#else
+static inline void rproc_init_cdev(void)
+{
+}
+
+static inline void rproc_exit_cdev(void)
+{
+}
+
+/*
+ * The character device interface is an optional feature, if it is not enabled
+ * the function should not return an error.
+ */
+static inline int rproc_char_device_add(struct rproc *rproc)
+{
+       return 0;
+}
+
+static inline void  rproc_char_device_remove(struct rproc *rproc)
+{
+}
+#endif
+
 void rproc_free_vring(struct rproc_vring *rvring);
 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
 
@@ -79,6 +113,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc)
        return 0;
 }
 
+static inline int rproc_attach_device(struct rproc *rproc)
+{
+       if (rproc->ops->attach)
+               return rproc->ops->attach(rproc);
+
+       return 0;
+}
+
 static inline
 int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
 {
index 52b8713..eea514c 100644 (file)
@@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
                          char *buf)
 {
        struct rproc *rproc = to_rproc(dev);
-
-       return sprintf(buf, "%s\n", rproc->firmware);
+       const char *firmware = rproc->firmware;
+
+       /*
+        * If the remote processor has been started by an external
+        * entity we have no idea of what image it is running.  As such
+        * simply display a generic string rather then rproc->firmware.
+        *
+        * Here we rely on the autonomous flag because a remote processor
+        * may have been attached to and currently in a running state.
+        */
+       if (rproc->autonomous)
+               firmware = "unknown";
+
+       return sprintf(buf, "%s\n", firmware);
 }
 
 /* Change firmware name via sysfs */
@@ -72,6 +84,7 @@ static const char * const rproc_state_string[] = {
        [RPROC_RUNNING]         = "running",
        [RPROC_CRASHED]         = "crashed",
        [RPROC_DELETED]         = "deleted",
+       [RPROC_DETACHED]        = "detached",
        [RPROC_LAST]            = "invalid",
 };
 
index 062797a..f4da42f 100644 (file)
 #define STM32_MBX_VQ1_ID       1
 #define STM32_MBX_SHUTDOWN     "shutdown"
 
+#define RSC_TBL_SIZE           1024
+
+#define M4_STATE_OFF           0
+#define M4_STATE_INI           1
+#define M4_STATE_CRUN          2
+#define M4_STATE_CSTOP         3
+#define M4_STATE_STANDBY       4
+#define M4_STATE_CRASH         5
+
 struct stm32_syscon {
        struct regmap *map;
        u32 reg;
@@ -71,12 +80,15 @@ struct stm32_rproc {
        struct reset_control *rst;
        struct stm32_syscon hold_boot;
        struct stm32_syscon pdds;
+       struct stm32_syscon m4_state;
+       struct stm32_syscon rsctbl;
        int wdg_irq;
        u32 nb_rmems;
        struct stm32_rproc_mem *rmems;
        struct stm32_mbox mb[MBOX_NB_MBX];
        struct workqueue_struct *workqueue;
        bool secured_soc;
+       void __iomem *rsc_va;
 };
 
 static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
@@ -128,10 +140,10 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
        return 0;
 }
 
-static int stm32_rproc_of_memory_translations(struct rproc *rproc)
+static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
+                                             struct stm32_rproc *ddata)
 {
-       struct device *parent, *dev = rproc->dev.parent;
-       struct stm32_rproc *ddata = rproc->priv;
+       struct device *parent, *dev = &pdev->dev;
        struct device_node *np;
        struct stm32_rproc_mem *p_mems;
        struct stm32_rproc_mem_ranges *mem_range;
@@ -204,7 +216,7 @@ static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
        return 0;
 }
 
-static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+static int stm32_rproc_parse_memory_regions(struct rproc *rproc)
 {
        struct device *dev = rproc->dev.parent;
        struct device_node *np = dev->of_node;
@@ -257,12 +269,23 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
                index++;
        }
 
+       return 0;
+}
+
+static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+       int ret = stm32_rproc_parse_memory_regions(rproc);
+
+       if (ret)
+               return ret;
+
        return stm32_rproc_elf_load_rsc_table(rproc, fw);
 }
 
 static irqreturn_t stm32_rproc_wdg(int irq, void *data)
 {
-       struct rproc *rproc = data;
+       struct platform_device *pdev = data;
+       struct rproc *rproc = platform_get_drvdata(pdev);
 
        rproc_report_crash(rproc, RPROC_WATCHDOG);
 
@@ -437,6 +460,13 @@ static int stm32_rproc_start(struct rproc *rproc)
        return stm32_rproc_set_hold_boot(rproc, true);
 }
 
+static int stm32_rproc_attach(struct rproc *rproc)
+{
+       stm32_rproc_add_coredump_trace(rproc);
+
+       return stm32_rproc_set_hold_boot(rproc, true);
+}
+
 static int stm32_rproc_stop(struct rproc *rproc)
 {
        struct stm32_rproc *ddata = rproc->priv;
@@ -474,6 +504,18 @@ static int stm32_rproc_stop(struct rproc *rproc)
                }
        }
 
+       /* update coprocessor state to OFF if available */
+       if (ddata->m4_state.map) {
+               err = regmap_update_bits(ddata->m4_state.map,
+                                        ddata->m4_state.reg,
+                                        ddata->m4_state.mask,
+                                        M4_STATE_OFF);
+               if (err) {
+                       dev_err(&rproc->dev, "failed to set copro state\n");
+                       return err;
+               }
+       }
+
        return 0;
 }
 
@@ -502,6 +544,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
 static struct rproc_ops st_rproc_ops = {
        .start          = stm32_rproc_start,
        .stop           = stm32_rproc_stop,
+       .attach         = stm32_rproc_attach,
        .kick           = stm32_rproc_kick,
        .load           = rproc_elf_load_segments,
        .parse_fw       = stm32_rproc_parse_fw,
@@ -538,12 +581,11 @@ out:
        return err;
 }
 
-static int stm32_rproc_parse_dt(struct platform_device *pdev)
+static int stm32_rproc_parse_dt(struct platform_device *pdev,
+                               struct stm32_rproc *ddata, bool *auto_boot)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
-       struct rproc *rproc = platform_get_drvdata(pdev);
-       struct stm32_rproc *ddata = rproc->priv;
        struct stm32_syscon tz;
        unsigned int tzen;
        int err, irq;
@@ -554,7 +596,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
 
        if (irq > 0) {
                err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
-                                      dev_name(dev), rproc);
+                                      dev_name(dev), pdev);
                if (err) {
                        dev_err(dev, "failed to request wdg irq\n");
                        return err;
@@ -589,7 +631,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
 
        err = regmap_read(tz.map, tz.reg, &tzen);
        if (err) {
-               dev_err(&rproc->dev, "failed to read tzen\n");
+               dev_err(dev, "failed to read tzen\n");
                return err;
        }
        ddata->secured_soc = tzen & tz.mask;
@@ -605,9 +647,118 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
        if (err)
                dev_info(dev, "failed to get pdds\n");
 
-       rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
+       *auto_boot = of_property_read_bool(np, "st,auto-boot");
 
-       return stm32_rproc_of_memory_translations(rproc);
+       /*
+        * See if we can check the M4 status, i.e if it was started
+        * from the boot loader or not.
+        */
+       err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
+                                    &ddata->m4_state);
+       if (err) {
+               /* remember this */
+               ddata->m4_state.map = NULL;
+               /* no coprocessor state syscon (optional) */
+               dev_warn(dev, "m4 state not supported\n");
+
+               /* no need to go further */
+               return 0;
+       }
+
+       /* See if we can get the resource table */
+       err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
+                                    &ddata->rsctbl);
+       if (err) {
+               /* no rsc table syscon (optional) */
+               dev_warn(dev, "rsc tbl syscon not supported\n");
+       }
+
+       return 0;
+}
+
+static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+                                    unsigned int *state)
+{
+       /* See stm32_rproc_parse_dt() */
+       if (!ddata->m4_state.map) {
+               /*
+                * We couldn't get the coprocessor's state, assume
+                * it is not running.
+                */
+               state = M4_STATE_OFF;
+               return 0;
+       }
+
+       return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
+}
+
+static int stm32_rproc_da_to_pa(struct platform_device *pdev,
+                               struct stm32_rproc *ddata,
+                               u64 da, phys_addr_t *pa)
+{
+       struct device *dev = &pdev->dev;
+       struct stm32_rproc_mem *p_mem;
+       unsigned int i;
+
+       for (i = 0; i < ddata->nb_rmems; i++) {
+               p_mem = &ddata->rmems[i];
+
+               if (da < p_mem->dev_addr ||
+                   da >= p_mem->dev_addr + p_mem->size)
+                       continue;
+
+               *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+               dev_dbg(dev, "da %llx to pa %#x\n", da, *pa);
+
+               return 0;
+       }
+
+       dev_err(dev, "can't translate da %llx\n", da);
+
+       return -EINVAL;
+}
+
+static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev,
+                                           struct rproc *rproc,
+                                           struct stm32_rproc *ddata)
+{
+       struct device *dev = &pdev->dev;
+       phys_addr_t rsc_pa;
+       u32 rsc_da;
+       int err;
+
+       err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+       if (err) {
+               dev_err(dev, "failed to read rsc tbl addr\n");
+               return err;
+       }
+
+       if (!rsc_da)
+               /* no rsc table */
+               return 0;
+
+       err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa);
+       if (err)
+               return err;
+
+       ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+       if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+               dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+                       &rsc_pa, RSC_TBL_SIZE);
+               ddata->rsc_va = NULL;
+               return -ENOMEM;
+       }
+
+       /*
+        * The resource table is already loaded in device memory, no need
+        * to work with a cached table.
+        */
+       rproc->cached_table = NULL;
+       /* Assuming the resource table fits in 1kB is fair */
+       rproc->table_sz = RSC_TBL_SIZE;
+       rproc->table_ptr = (struct resource_table *)ddata->rsc_va;
+
+       return 0;
 }
 
 static int stm32_rproc_probe(struct platform_device *pdev)
@@ -616,6 +767,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
        struct stm32_rproc *ddata;
        struct device_node *np = dev->of_node;
        struct rproc *rproc;
+       unsigned int state;
        int ret;
 
        ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -626,25 +778,47 @@ static int stm32_rproc_probe(struct platform_device *pdev)
        if (!rproc)
                return -ENOMEM;
 
+       ddata = rproc->priv;
+
        rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+       ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
+       if (ret)
+               goto free_rproc;
+
+       ret = stm32_rproc_of_memory_translations(pdev, ddata);
+       if (ret)
+               goto free_rproc;
+
+       ret = stm32_rproc_get_m4_status(ddata, &state);
+       if (ret)
+               goto free_rproc;
+
+       if (state == M4_STATE_CRUN) {
+               rproc->state = RPROC_DETACHED;
+
+               ret = stm32_rproc_parse_memory_regions(rproc);
+               if (ret)
+                       goto free_resources;
+
+               ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata);
+               if (ret)
+                       goto free_resources;
+       }
+
        rproc->has_iommu = false;
-       ddata = rproc->priv;
        ddata->workqueue = create_workqueue(dev_name(dev));
        if (!ddata->workqueue) {
                dev_err(dev, "cannot create workqueue\n");
                ret = -ENOMEM;
-               goto free_rproc;
+               goto free_resources;
        }
 
        platform_set_drvdata(pdev, rproc);
 
-       ret = stm32_rproc_parse_dt(pdev);
-       if (ret)
-               goto free_wkq;
-
        ret = stm32_rproc_request_mbox(rproc);
        if (ret)
-               goto free_rproc;
+               goto free_wkq;
 
        ret = rproc_add(rproc);
        if (ret)
@@ -656,6 +830,8 @@ free_mb:
        stm32_rproc_free_mbox(rproc);
 free_wkq:
        destroy_workqueue(ddata->workqueue);
+free_resources:
+       rproc_resource_cleanup(rproc);
 free_rproc:
        if (device_may_wakeup(dev)) {
                dev_pm_clear_wake_irq(dev);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
new file mode 100644 (file)
index 0000000..9011e47
--- /dev/null
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 DSP Remote Processor(s) driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ *     Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK      (SZ_16M - 1)
+
+/**
+ * struct k3_dsp_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_dsp_mem {
+       void __iomem *cpu_addr;
+       phys_addr_t bus_addr;
+       u32 dev_addr;
+       size_t size;
+};
+
+/**
+ * struct k3_dsp_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_dsp_mem_data {
+       const char *name;
+       const u32 dev_addr;
+};
+
+/**
+ * struct k3_dsp_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_dsp_dev_data {
+       const struct k3_dsp_mem_data *mems;
+       u32 num_mems;
+       u32 boot_align_addr;
+       bool uses_lreset;
+};
+
+/**
+ * struct k3_dsp_rproc - k3 DSP remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_dsp_rproc {
+       struct device *dev;
+       struct rproc *rproc;
+       struct k3_dsp_mem *mem;
+       int num_mems;
+       struct k3_dsp_mem *rmem;
+       int num_rmems;
+       struct reset_control *reset;
+       const struct k3_dsp_dev_data *data;
+       struct ti_sci_proc *tsp;
+       const struct ti_sci_handle *ti_sci;
+       u32 ti_sci_id;
+       struct mbox_chan *mbox;
+       struct mbox_client client;
+};
+
+/**
+ * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+       struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
+                                                 client);
+       struct device *dev = kproc->rproc->dev.parent;
+       const char *name = kproc->rproc->name;
+       u32 msg = omap_mbox_message(data);
+
+       dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+       switch (msg) {
+       case RP_MBOX_CRASH:
+               /*
+                * remoteproc detected an exception, but error recovery is not
+                * supported. So, just log this for now
+                */
+               dev_err(dev, "K3 DSP rproc %s crashed\n", name);
+               break;
+       case RP_MBOX_ECHO_REPLY:
+               dev_info(dev, "received echo reply from %s\n", name);
+               break;
+       default:
+               /* silently handle all other valid messages */
+               if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+                       return;
+               if (msg > kproc->rproc->max_notifyid) {
+                       dev_dbg(dev, "dropping unknown message 0x%x", msg);
+                       return;
+               }
+               /* msg contains the index of the triggered vring */
+               if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+                       dev_dbg(dev, "no message was found in vqid %d\n", msg);
+       }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = rproc->dev.parent;
+       mbox_msg_t msg = (mbox_msg_t)vqid;
+       int ret;
+
+       /* send the index of the triggered virtqueue in the mailbox payload */
+       ret = mbox_send_message(kproc->mbox, (void *)msg);
+       if (ret < 0)
+               dev_err(dev, "failed to send mailbox message, status = %d\n",
+                       ret);
+}
+
+/* Put the DSP processor into reset */
+static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
+{
+       struct device *dev = kproc->dev;
+       int ret;
+
+       ret = reset_control_assert(kproc->reset);
+       if (ret) {
+               dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+               return ret;
+       }
+
+       if (kproc->data->uses_lreset)
+               return ret;
+
+       ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret) {
+               dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+               if (reset_control_deassert(kproc->reset))
+                       dev_warn(dev, "local-reset deassert back failed\n");
+       }
+
+       return ret;
+}
+
+/* Release the DSP processor from reset */
+static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
+{
+       struct device *dev = kproc->dev;
+       int ret;
+
+       if (kproc->data->uses_lreset)
+               goto lreset;
+
+       ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret) {
+               dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+               return ret;
+       }
+
+lreset:
+       ret = reset_control_deassert(kproc->reset);
+       if (ret) {
+               dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+               if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+                                                         kproc->ti_sci_id))
+                       dev_warn(dev, "module-reset assert back failed\n");
+       }
+
+       return ret;
+}
+
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret)
+               dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+                       ret);
+
+       return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct device *dev = kproc->dev;
+       int ret;
+
+       ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+                                                   kproc->ti_sci_id);
+       if (ret)
+               dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+       return ret;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int k3_dsp_rproc_start(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       struct mbox_client *client = &kproc->client;
+       struct device *dev = kproc->dev;
+       u32 boot_addr;
+       int ret;
+
+       client->dev = dev;
+       client->tx_done = NULL;
+       client->rx_callback = k3_dsp_rproc_mbox_callback;
+       client->tx_block = false;
+       client->knows_txdone = false;
+
+       kproc->mbox = mbox_request_channel(client, 0);
+       if (IS_ERR(kproc->mbox)) {
+               ret = -EBUSY;
+               dev_err(dev, "mbox_request_channel failed: %ld\n",
+                       PTR_ERR(kproc->mbox));
+               return ret;
+       }
+
+       /*
+        * Ping the remote processor, this is only for sanity-sake for now;
+        * there is no functional effect whatsoever.
+        *
+        * Note that the reply will _not_ arrive immediately: this message
+        * will wait in the mailbox fifo until the remote processor is booted.
+        */
+       ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+       if (ret < 0) {
+               dev_err(dev, "mbox_send_message failed: %d\n", ret);
+               goto put_mbox;
+       }
+
+       boot_addr = rproc->bootaddr;
+       if (boot_addr & (kproc->data->boot_align_addr - 1)) {
+               dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
+                       boot_addr, kproc->data->boot_align_addr);
+               ret = -EINVAL;
+               goto put_mbox;
+       }
+
+       dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+       ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
+       if (ret)
+               goto put_mbox;
+
+       ret = k3_dsp_rproc_release(kproc);
+       if (ret)
+               goto put_mbox;
+
+       return 0;
+
+put_mbox:
+       mbox_free_channel(kproc->mbox);
+       return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int k3_dsp_rproc_stop(struct rproc *rproc)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+
+       mbox_free_channel(kproc->mbox);
+
+       k3_dsp_rproc_reset(kproc);
+
+       return 0;
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address.  The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+       struct k3_dsp_rproc *kproc = rproc->priv;
+       void __iomem *va = NULL;
+       phys_addr_t bus_addr;
+       u32 dev_addr, offset;
+       size_t size;
+       int i;
+
+       if (len == 0)
+               return NULL;
+
+       for (i = 0; i < kproc->num_mems; i++) {
+               bus_addr = kproc->mem[i].bus_addr;
+               dev_addr = kproc->mem[i].dev_addr;
+               size = kproc->mem[i].size;
+
+               if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+                       /* handle DSP-view addresses */
+                       if (da >= dev_addr &&
+                           ((da + len) <= (dev_addr + size))) {
+                               offset = da - dev_addr;
+                               va = kproc->mem[i].cpu_addr + offset;
+                               return (__force void *)va;
+                       }
+               } else {
+                       /* handle SoC-view addresses */
+                       if (da >= bus_addr &&
+                           (da + len) <= (bus_addr + size)) {
+                               offset = da - bus_addr;
+                               va = kproc->mem[i].cpu_addr + offset;
+                               return (__force void *)va;
+                       }
+               }
+       }
+
+       /* handle static DDR reserved memory regions */
+       for (i = 0; i < kproc->num_rmems; i++) {
+               dev_addr = kproc->rmem[i].dev_addr;
+               size = kproc->rmem[i].size;
+
+               if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+                       offset = da - dev_addr;
+                       va = kproc->rmem[i].cpu_addr + offset;
+                       return (__force void *)va;
+               }
+       }
+
+       return NULL;
+}
+
+static const struct rproc_ops k3_dsp_rproc_ops = {
+       .start          = k3_dsp_rproc_start,
+       .stop           = k3_dsp_rproc_stop,
+       .kick           = k3_dsp_rproc_kick,
+       .da_to_va       = k3_dsp_rproc_da_to_va,
+};
+
+static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
+                                       struct k3_dsp_rproc *kproc)
+{
+       const struct k3_dsp_dev_data *data = kproc->data;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int num_mems = 0;
+       int i;
+
+       num_mems = kproc->data->num_mems;
+       kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+                                 sizeof(*kproc->mem), GFP_KERNEL);
+       if (!kproc->mem)
+               return -ENOMEM;
+
+       for (i = 0; i < num_mems; i++) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  data->mems[i].name);
+               if (!res) {
+                       dev_err(dev, "found no memory resource for %s\n",
+                               data->mems[i].name);
+                       return -EINVAL;
+               }
+               if (!devm_request_mem_region(dev, res->start,
+                                            resource_size(res),
+                                            dev_name(dev))) {
+                       dev_err(dev, "could not request %s region for resource\n",
+                               data->mems[i].name);
+                       return -EBUSY;
+               }
+
+               kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+                                                        resource_size(res));
+               if (IS_ERR(kproc->mem[i].cpu_addr)) {
+                       dev_err(dev, "failed to map %s memory\n",
+                               data->mems[i].name);
+                       return PTR_ERR(kproc->mem[i].cpu_addr);
+               }
+               kproc->mem[i].bus_addr = res->start;
+               kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+               kproc->mem[i].size = resource_size(res);
+
+               dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+                       data->mems[i].name, &kproc->mem[i].bus_addr,
+                       kproc->mem[i].size, kproc->mem[i].cpu_addr,
+                       kproc->mem[i].dev_addr);
+       }
+       kproc->num_mems = num_mems;
+
+       return 0;
+}
+
+static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+{
+       struct device *dev = kproc->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *rmem_np;
+       struct reserved_mem *rmem;
+       int num_rmems;
+       int ret, i;
+
+       num_rmems = of_property_count_elems_of_size(np, "memory-region",
+                                                   sizeof(phandle));
+       if (num_rmems <= 0) {
+               dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+                       num_rmems);
+               return -EINVAL;
+       }
+       if (num_rmems < 2) {
+               dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
+                       num_rmems);
+               return -EINVAL;
+       }
+
+       /* use reserved memory region 0 for vring DMA allocations */
+       ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+       if (ret) {
+               dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+                       ret);
+               return ret;
+       }
+
+       num_rmems--;
+       kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+       if (!kproc->rmem) {
+               ret = -ENOMEM;
+               goto release_rmem;
+       }
+
+       /* use remaining reserved memory regions for static carveouts */
+       for (i = 0; i < num_rmems; i++) {
+               rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+               if (!rmem_np) {
+                       ret = -EINVAL;
+                       goto unmap_rmem;
+               }
+
+               rmem = of_reserved_mem_lookup(rmem_np);
+               if (!rmem) {
+                       of_node_put(rmem_np);
+                       ret = -EINVAL;
+                       goto unmap_rmem;
+               }
+               of_node_put(rmem_np);
+
+               kproc->rmem[i].bus_addr = rmem->base;
+               /* 64-bit address regions currently not supported */
+               kproc->rmem[i].dev_addr = (u32)rmem->base;
+               kproc->rmem[i].size = rmem->size;
+               kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+               if (!kproc->rmem[i].cpu_addr) {
+                       dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+                               i + 1, &rmem->base, &rmem->size);
+                       ret = -ENOMEM;
+                       goto unmap_rmem;
+               }
+
+               dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+                       i + 1, &kproc->rmem[i].bus_addr,
+                       kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+                       kproc->rmem[i].dev_addr);
+       }
+       kproc->num_rmems = num_rmems;
+
+       return 0;
+
+unmap_rmem:
+       for (i--; i >= 0; i--)
+               iounmap(kproc->rmem[i].cpu_addr);
+       kfree(kproc->rmem);
+release_rmem:
+       of_reserved_mem_device_release(kproc->dev);
+       return ret;
+}
+
+static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+{
+       int i;
+
+       for (i = 0; i < kproc->num_rmems; i++)
+               iounmap(kproc->rmem[i].cpu_addr);
+       kfree(kproc->rmem);
+
+       of_reserved_mem_device_release(kproc->dev);
+}
+
+static
+struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
+                                           const struct ti_sci_handle *sci)
+{
+       struct ti_sci_proc *tsp;
+       u32 temp[2];
+       int ret;
+
+       ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+                                        temp, 2);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+       if (!tsp)
+               return ERR_PTR(-ENOMEM);
+
+       tsp->dev = dev;
+       tsp->sci = sci;
+       tsp->ops = &sci->ops.proc_ops;
+       tsp->proc_id = temp[0];
+       tsp->host_id = temp[1];
+
+       return tsp;
+}
+
+static int k3_dsp_rproc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       const struct k3_dsp_dev_data *data;
+       struct k3_dsp_rproc *kproc;
+       struct rproc *rproc;
+       const char *fw_name;
+       int ret = 0;
+       int ret1;
+
+       data = of_device_get_match_data(dev);
+       if (!data)
+               return -ENODEV;
+
+       ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+       if (ret) {
+               dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+                       ret);
+               return ret;
+       }
+
+       rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
+                           sizeof(*kproc));
+       if (!rproc)
+               return -ENOMEM;
+
+       rproc->has_iommu = false;
+       rproc->recovery_disabled = true;
+       if (data->uses_lreset) {
+               rproc->ops->prepare = k3_dsp_rproc_prepare;
+               rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+       }
+       kproc = rproc->priv;
+       kproc->rproc = rproc;
+       kproc->dev = dev;
+       kproc->data = data;
+
+       kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+       if (IS_ERR(kproc->ti_sci)) {
+               ret = PTR_ERR(kproc->ti_sci);
+               if (ret != -EPROBE_DEFER) {
+                       dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+                               ret);
+               }
+               kproc->ti_sci = NULL;
+               goto free_rproc;
+       }
+
+       ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+       if (ret) {
+               dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+               goto put_sci;
+       }
+
+       kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+       if (IS_ERR(kproc->reset)) {
+               ret = PTR_ERR(kproc->reset);
+               dev_err(dev, "failed to get reset, status = %d\n", ret);
+               goto put_sci;
+       }
+
+       kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+       if (IS_ERR(kproc->tsp)) {
+               dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+                       ret);
+               ret = PTR_ERR(kproc->tsp);
+               goto put_sci;
+       }
+
+       ret = ti_sci_proc_request(kproc->tsp);
+       if (ret < 0) {
+               dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+               goto free_tsp;
+       }
+
+       ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+       if (ret)
+               goto release_tsp;
+
+       ret = k3_dsp_reserved_mem_init(kproc);
+       if (ret) {
+               dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+               goto release_tsp;
+       }
+
+       /*
+        * ensure the DSP local reset is asserted to ensure the DSP doesn't
+        * execute bogus code in .prepare() when the module reset is released.
+        */
+       if (data->uses_lreset) {
+               ret = reset_control_status(kproc->reset);
+               if (ret < 0) {
+                       dev_err(dev, "failed to get reset status, status = %d\n",
+                               ret);
+                       goto release_mem;
+               } else if (ret == 0) {
+                       dev_warn(dev, "local reset is deasserted for device\n");
+                       k3_dsp_rproc_reset(kproc);
+               }
+       }
+
+       ret = rproc_add(rproc);
+       if (ret) {
+               dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+                       ret);
+               goto release_mem;
+       }
+
+       platform_set_drvdata(pdev, kproc);
+
+       return 0;
+
+release_mem:
+       k3_dsp_reserved_mem_exit(kproc);
+release_tsp:
+       ret1 = ti_sci_proc_release(kproc->tsp);
+       if (ret1)
+               dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+       kfree(kproc->tsp);
+put_sci:
+       ret1 = ti_sci_put_handle(kproc->ti_sci);
+       if (ret1)
+               dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+       rproc_free(rproc);
+       return ret;
+}
+
+static int k3_dsp_rproc_remove(struct platform_device *pdev)
+{
+       struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       rproc_del(kproc->rproc);
+
+       ret = ti_sci_proc_release(kproc->tsp);
+       if (ret)
+               dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+       kfree(kproc->tsp);
+
+       ret = ti_sci_put_handle(kproc->ti_sci);
+       if (ret)
+               dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+       k3_dsp_reserved_mem_exit(kproc);
+       rproc_free(kproc->rproc);
+
+       return 0;
+}
+
+static const struct k3_dsp_mem_data c66_mems[] = {
+       { .name = "l2sram", .dev_addr = 0x800000 },
+       { .name = "l1pram", .dev_addr = 0xe00000 },
+       { .name = "l1dram", .dev_addr = 0xf00000 },
+};
+
+/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
+static const struct k3_dsp_mem_data c71_mems[] = {
+       { .name = "l2sram", .dev_addr = 0x800000 },
+       { .name = "l1dram", .dev_addr = 0xe00000 },
+};
+
+static const struct k3_dsp_dev_data c66_data = {
+       .mems = c66_mems,
+       .num_mems = ARRAY_SIZE(c66_mems),
+       .boot_align_addr = SZ_1K,
+       .uses_lreset = true,
+};
+
+static const struct k3_dsp_dev_data c71_data = {
+       .mems = c71_mems,
+       .num_mems = ARRAY_SIZE(c71_mems),
+       .boot_align_addr = SZ_2M,
+       .uses_lreset = false,
+};
+
+static const struct of_device_id k3_dsp_of_match[] = {
+       { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
+       { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
+
+static struct platform_driver k3_dsp_rproc_driver = {
+       .probe  = k3_dsp_rproc_probe,
+       .remove = k3_dsp_rproc_remove,
+       .driver = {
+               .name = "k3-dsp-rproc",
+               .of_match_table = k3_dsp_of_match,
+       },
+};
+
+module_platform_driver(k3_dsp_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
new file mode 100644 (file)
index 0000000..778558a
--- /dev/null
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Texas Instruments TI-SCI Processor Controller Helper Functions
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ *     Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_SCI_PROC_H
+#define REMOTEPROC_TI_SCI_PROC_H
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_proc - structure representing a processor control client
+ * @sci: cached TI-SCI protocol handle
+ * @ops: cached TI-SCI proc ops
+ * @dev: cached client device pointer
+ * @proc_id: processor id for the consumer remoteproc device
+ * @host_id: host id to pass the control over for this consumer remoteproc
+ *          device
+ */
+struct ti_sci_proc {
+       const struct ti_sci_handle *sci;
+       const struct ti_sci_proc_ops *ops;
+       struct device *dev;
+       u8 proc_id;
+       u8 host_id;
+};
+
+static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
+{
+       int ret;
+
+       ret = tsp->ops->request(tsp->sci, tsp->proc_id);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor request failed: %d\n",
+                       ret);
+       return ret;
+}
+
+static inline int ti_sci_proc_release(struct ti_sci_proc *tsp)
+{
+       int ret;
+
+       ret = tsp->ops->release(tsp->sci, tsp->proc_id);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor release failed: %d\n",
+                       ret);
+       return ret;
+}
+
+static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp)
+{
+       int ret;
+
+       ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n",
+                       tsp->proc_id, tsp->host_id, ret);
+       return ret;
+}
+
+static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp,
+                                        u64 boot_vector,
+                                        u32 cfg_set, u32 cfg_clr)
+{
+       int ret;
+
+       ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector,
+                                  cfg_set, cfg_clr);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n",
+                       ret);
+       return ret;
+}
+
+static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp,
+                                         u32 ctrl_set, u32 ctrl_clr)
+{
+       int ret;
+
+       ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n",
+                       ret);
+       return ret;
+}
+
+static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp,
+                                        u64 *boot_vector, u32 *cfg_flags,
+                                        u32 *ctrl_flags, u32 *status_flags)
+{
+       int ret;
+
+       ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector,
+                                  cfg_flags, ctrl_flags, status_flags);
+       if (ret)
+               dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n",
+                       ret);
+       return ret;
+}
+
+#endif /* REMOTEPROC_TI_SCI_PROC_H */
index 07d4f33..9006fc7 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 #include <linux/wait.h>
@@ -84,11 +85,11 @@ struct virtproc_info {
  * Every message sent(/received) on the rpmsg bus begins with this header.
  */
 struct rpmsg_hdr {
-       u32 src;
-       u32 dst;
-       u32 reserved;
-       u16 len;
-       u16 flags;
+       __virtio32 src;
+       __virtio32 dst;
+       __virtio32 reserved;
+       __virtio16 len;
+       __virtio16 flags;
        u8 data[];
 } __packed;
 
@@ -106,8 +107,8 @@ struct rpmsg_hdr {
  */
 struct rpmsg_ns_msg {
        char name[RPMSG_NAME_SIZE];
-       u32 addr;
-       u32 flags;
+       __virtio32 addr;
+       __virtio32 flags;
 } __packed;
 
 /**
@@ -335,8 +336,8 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
                struct rpmsg_ns_msg nsm;
 
                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
-               nsm.addr = rpdev->ept->addr;
-               nsm.flags = RPMSG_NS_CREATE;
+               nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+               nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE);
 
                err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
                if (err)
@@ -359,8 +360,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
                struct rpmsg_ns_msg nsm;
 
                strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
-               nsm.addr = rpdev->ept->addr;
-               nsm.flags = RPMSG_NS_DESTROY;
+               nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr);
+               nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY);
 
                err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
                if (err)
@@ -612,18 +613,18 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
                }
        }
 
-       msg->len = len;
+       msg->len = cpu_to_virtio16(vrp->vdev, len);
        msg->flags = 0;
-       msg->src = src;
-       msg->dst = dst;
+       msg->src = cpu_to_virtio32(vrp->vdev, src);
+       msg->dst = cpu_to_virtio32(vrp->vdev, dst);
        msg->reserved = 0;
        memcpy(msg->data, data, len);
 
        dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
-               msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+               src, dst, len, msg->flags, msg->reserved);
 #if defined(CONFIG_DYNAMIC_DEBUG)
        dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
-                        msg, sizeof(*msg) + msg->len, true);
+                        msg, sizeof(*msg) + len, true);
 #endif
 
        rpmsg_sg_init(&sg, msg, sizeof(*msg) + len);
@@ -704,13 +705,17 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
 {
        struct rpmsg_endpoint *ept;
        struct scatterlist sg;
+       unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len);
        int err;
 
        dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
-               msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
+               virtio32_to_cpu(vrp->vdev, msg->src),
+               virtio32_to_cpu(vrp->vdev, msg->dst), msg_len,
+               virtio16_to_cpu(vrp->vdev, msg->flags),
+               virtio32_to_cpu(vrp->vdev, msg->reserved));
 #if defined(CONFIG_DYNAMIC_DEBUG)
        dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
-                        msg, sizeof(*msg) + msg->len, true);
+                        msg, sizeof(*msg) + msg_len, true);
 #endif
 
        /*
@@ -718,15 +723,15 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
         * the reported payload length.
         */
        if (len > vrp->buf_size ||
-           msg->len > (len - sizeof(struct rpmsg_hdr))) {
-               dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
+           msg_len > (len - sizeof(struct rpmsg_hdr))) {
+               dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg_len);
                return -EINVAL;
        }
 
        /* use the dst addr to fetch the callback of the appropriate user */
        mutex_lock(&vrp->endpoints_lock);
 
-       ept = idr_find(&vrp->endpoints, msg->dst);
+       ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst));
 
        /* let's make sure no one deallocates ept while we use it */
        if (ept)
@@ -739,8 +744,8 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
                mutex_lock(&ept->cb_lock);
 
                if (ept->cb)
-                       ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
-                               msg->src);
+                       ept->cb(ept->rpdev, msg->data, msg_len, ept->priv,
+                               virtio32_to_cpu(vrp->vdev, msg->src));
 
                mutex_unlock(&ept->cb_lock);
 
@@ -846,15 +851,15 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
        /* don't trust the remote processor for null terminating the name */
        msg->name[RPMSG_NAME_SIZE - 1] = '\0';
 
-       dev_info(dev, "%sing channel %s addr 0x%x\n",
-                msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
-                msg->name, msg->addr);
-
        strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
        chinfo.src = RPMSG_ADDR_ANY;
-       chinfo.dst = msg->addr;
+       chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr);
+
+       dev_info(dev, "%sing channel %s addr 0x%x\n",
+                virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ?
+                "destroy" : "creat", msg->name, chinfo.dst);
 
-       if (msg->flags & RPMSG_NS_DESTROY) {
+       if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) {
                ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo);
                if (ret)
                        dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
index f3b8e6d..48c536a 100644 (file)
@@ -281,7 +281,8 @@ config RTC_DRV_DS1374
 
 config RTC_DRV_DS1374_WDT
        bool "Dallas/Maxim DS1374 watchdog timer"
-       depends on RTC_DRV_DS1374
+       depends on RTC_DRV_DS1374 && WATCHDOG
+       select WATCHDOG_CORE
        help
          If you say Y here you will get support for the
          watchdog timer in the Dallas Semiconductor DS1374
index 811fe20..2370ac0 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Detailed datasheet of the chip is available here:
  *
- *  http://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf
+ *  https://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf
  *
  * This work is based on ISL12057 driver (drivers/rtc/rtc-isl12057.c).
  *
index 4a63f0c..933e423 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2014 Pavel Machek <pavel@denx.de>
  *
  * You can get hardware description at
- * http://www.ti.com/lit/ds/symlink/bq32000.pdf
+ * https://www.ti.com/lit/ds/symlink/bq32000.pdf
  */
 
 #include <linux/module.h>
index a603f1f..800667d 100644 (file)
@@ -261,7 +261,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
                return PTR_ERR(rtc->rtc_dev);
 
        rtc->rtc_dev->ops = &cpcap_rtc_ops;
-       rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1;
+       rtc->rtc_dev->range_max = (timeu64_t) (DAY_MASK + 1) * SECS_PER_DAY - 1;
 
        err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor);
        if (err)
index 4970294..54c85cd 100644 (file)
@@ -1668,6 +1668,8 @@ static const struct watchdog_ops ds1388_wdt_ops = {
 static void ds1307_wdt_register(struct ds1307 *ds1307)
 {
        struct watchdog_device  *wdt;
+       int err;
+       int val;
 
        if (ds1307->type != ds_1388)
                return;
@@ -1676,6 +1678,10 @@ static void ds1307_wdt_register(struct ds1307 *ds1307)
        if (!wdt)
                return;
 
+       err = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &val);
+       if (!err && val & DS1388_BIT_WF)
+               wdt->bootstatus = WDIOF_CARDRESET;
+
        wdt->info = &ds1388_wdt_info;
        wdt->ops = &ds1388_wdt_ops;
        wdt->timeout = 99;
index 9c51a12..177d870 100644 (file)
@@ -46,6 +46,7 @@
 #define DS1374_REG_WDALM2      0x06
 #define DS1374_REG_CR          0x07 /* Control */
 #define DS1374_REG_CR_AIE      0x01 /* Alarm Int. Enable */
+#define DS1374_REG_CR_WDSTR    0x08 /* 1=INT, 0=RST */
 #define DS1374_REG_CR_WDALM    0x20 /* 1=Watchdog, 0=Alarm */
 #define DS1374_REG_CR_WACE     0x40 /* WD/Alarm counter enable */
 #define DS1374_REG_SR          0x08 /* Status */
@@ -71,7 +72,9 @@ struct ds1374 {
        struct i2c_client *client;
        struct rtc_device *rtc;
        struct work_struct work;
-
+#ifdef CONFIG_RTC_DRV_DS1374_WDT
+       struct watchdog_device wdt;
+#endif
        /* The mutex protects alarm operations, and prevents a race
         * between the enable_irq() in the workqueue and the free_irq()
         * in the remove function.
@@ -369,238 +372,96 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
  *
  *****************************************************************************
  */
-static struct i2c_client *save_client;
 /* Default margin */
-#define WD_TIMO 131762
+#define TIMER_MARGIN_DEFAULT   32
+#define TIMER_MARGIN_MIN       1
+#define TIMER_MARGIN_MAX       4095 /* 24-bit value */
 
-#define DRV_NAME "DS1374 Watchdog"
-
-static int wdt_margin = WD_TIMO;
-static unsigned long wdt_is_open;
+static int wdt_margin;
 module_param(wdt_margin, int, 0);
 MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 32s)");
 
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default ="
+               __MODULE_STRING(WATCHDOG_NOWAYOUT)")");
+
 static const struct watchdog_info ds1374_wdt_info = {
-       .identity       = "DS1374 WTD",
+       .identity       = "DS1374 Watchdog",
        .options        = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
                                                WDIOF_MAGICCLOSE,
 };
 
-static int ds1374_wdt_settimeout(unsigned int timeout)
+static int ds1374_wdt_settimeout(struct watchdog_device *wdt, unsigned int timeout)
 {
-       int ret = -ENOIOCTLCMD;
-       int cr;
+       struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
+       struct i2c_client *client = ds1374->client;
+       int ret, cr;
 
-       ret = cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
-       if (ret < 0)
-               goto out;
+       wdt->timeout = timeout;
+
+       cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+       if (cr < 0)
+               return cr;
 
        /* Disable any existing watchdog/alarm before setting the new one */
        cr &= ~DS1374_REG_CR_WACE;
 
-       ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+       ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
        if (ret < 0)
-               goto out;
+               return ret;
 
        /* Set new watchdog time */
-       ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3);
-       if (ret) {
-               pr_info("couldn't set new watchdog time\n");
-               goto out;
-       }
+       timeout = timeout * 4096;
+       ret = ds1374_write_rtc(client, timeout, DS1374_REG_WDALM0, 3);
+       if (ret)
+               return ret;
 
        /* Enable watchdog timer */
        cr |= DS1374_REG_CR_WACE | DS1374_REG_CR_WDALM;
+       cr &= ~DS1374_REG_CR_WDSTR;/* for RST PIN */
        cr &= ~DS1374_REG_CR_AIE;
 
-       ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+       ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
        if (ret < 0)
-               goto out;
+               return ret;
 
        return 0;
-out:
-       return ret;
 }
 
-
 /*
  * Reload the watchdog timer.  (ie, pat the watchdog)
  */
-static void ds1374_wdt_ping(void)
+static int ds1374_wdt_start(struct watchdog_device *wdt)
 {
+       struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
        u32 val;
-       int ret = 0;
 
-       ret = ds1374_read_rtc(save_client, &val, DS1374_REG_WDALM0, 3);
-       if (ret)
-               pr_info("WD TICK FAIL!!!!!!!!!! %i\n", ret);
+       return ds1374_read_rtc(ds1374->client, &val, DS1374_REG_WDALM0, 3);
 }
 
-static void ds1374_wdt_disable(void)
+static int ds1374_wdt_stop(struct watchdog_device *wdt)
 {
+       struct ds1374 *ds1374 = watchdog_get_drvdata(wdt);
+       struct i2c_client *client = ds1374->client;
        int cr;
 
-       cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
+       cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+       if (cr < 0)
+               return cr;
+
        /* Disable watchdog timer */
        cr &= ~DS1374_REG_CR_WACE;
 
-       i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
-}
-
-/*
- * Watchdog device is opened, and watchdog starts running.
- */
-static int ds1374_wdt_open(struct inode *inode, struct file *file)
-{
-       struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
-
-       if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
-               mutex_lock(&ds1374->mutex);
-               if (test_and_set_bit(0, &wdt_is_open)) {
-                       mutex_unlock(&ds1374->mutex);
-                       return -EBUSY;
-               }
-               /*
-                *      Activate
-                */
-               wdt_is_open = 1;
-               mutex_unlock(&ds1374->mutex);
-               return stream_open(inode, file);
-       }
-       return -ENODEV;
-}
-
-/*
- * Close the watchdog device.
- */
-static int ds1374_wdt_release(struct inode *inode, struct file *file)
-{
-       if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
-               clear_bit(0, &wdt_is_open);
-
-       return 0;
-}
-
-/*
- * Pat the watchdog whenever device is written to.
- */
-static ssize_t ds1374_wdt_write(struct file *file, const char __user *data,
-                               size_t len, loff_t *ppos)
-{
-       if (len) {
-               ds1374_wdt_ping();
-               return 1;
-       }
-       return 0;
-}
-
-static ssize_t ds1374_wdt_read(struct file *file, char __user *data,
-                               size_t len, loff_t *ppos)
-{
-       return 0;
-}
-
-/*
- * Handle commands from user-space.
- */
-static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
-                                                       unsigned long arg)
-{
-       int new_margin, options;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               return copy_to_user((struct watchdog_info __user *)arg,
-               &ds1374_wdt_info, sizeof(ds1374_wdt_info)) ? -EFAULT : 0;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, (int __user *)arg);
-       case WDIOC_KEEPALIVE:
-               ds1374_wdt_ping();
-               return 0;
-       case WDIOC_SETTIMEOUT:
-               if (get_user(new_margin, (int __user *)arg))
-                       return -EFAULT;
-
-               /* the hardware's tick rate is 4096 Hz, so
-                * the counter value needs to be scaled accordingly
-                */
-               new_margin <<= 12;
-               if (new_margin < 1 || new_margin > 16777216)
-                       return -EINVAL;
-
-               wdt_margin = new_margin;
-               ds1374_wdt_settimeout(new_margin);
-               ds1374_wdt_ping();
-               /* fallthrough */
-       case WDIOC_GETTIMEOUT:
-               /* when returning ... inverse is true */
-               return put_user((wdt_margin >> 12), (int __user *)arg);
-       case WDIOC_SETOPTIONS:
-               if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
-                       return -EFAULT;
-
-               if (options & WDIOS_DISABLECARD) {
-                       pr_info("disable watchdog\n");
-                       ds1374_wdt_disable();
-                       return 0;
-               }
-
-               if (options & WDIOS_ENABLECARD) {
-                       pr_info("enable watchdog\n");
-                       ds1374_wdt_settimeout(wdt_margin);
-                       ds1374_wdt_ping();
-                       return 0;
-               }
-               return -EINVAL;
-       }
-       return -ENOTTY;
+       return i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr);
 }
 
-static long ds1374_wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
-                       unsigned long arg)
-{
-       int ret;
-       struct ds1374 *ds1374 = i2c_get_clientdata(save_client);
-
-       mutex_lock(&ds1374->mutex);
-       ret = ds1374_wdt_ioctl(file, cmd, arg);
-       mutex_unlock(&ds1374->mutex);
-
-       return ret;
-}
-
-static int ds1374_wdt_notify_sys(struct notifier_block *this,
-                       unsigned long code, void *unused)
-{
-       if (code == SYS_DOWN || code == SYS_HALT)
-               /* Disable Watchdog */
-               ds1374_wdt_disable();
-       return NOTIFY_DONE;
-}
-
-static const struct file_operations ds1374_wdt_fops = {
-       .owner                  = THIS_MODULE,
-       .read                   = ds1374_wdt_read,
-       .unlocked_ioctl         = ds1374_wdt_unlocked_ioctl,
-       .compat_ioctl           = compat_ptr_ioctl,
-       .write                  = ds1374_wdt_write,
-       .open                   = ds1374_wdt_open,
-       .release                = ds1374_wdt_release,
-       .llseek                 = no_llseek,
-};
-
-static struct miscdevice ds1374_miscdev = {
-       .minor          = WATCHDOG_MINOR,
-       .name           = "watchdog",
-       .fops           = &ds1374_wdt_fops,
-};
-
-static struct notifier_block ds1374_wdt_notifier = {
-       .notifier_call = ds1374_wdt_notify_sys,
+static const struct watchdog_ops ds1374_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = ds1374_wdt_start,
+       .stop           = ds1374_wdt_stop,
+       .set_timeout    = ds1374_wdt_settimeout,
 };
-
 #endif /*CONFIG_RTC_DRV_DS1374_WDT*/
 /*
  *****************************************************************************
@@ -652,16 +513,22 @@ static int ds1374_probe(struct i2c_client *client,
                return ret;
 
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
-       save_client = client;
-       ret = misc_register(&ds1374_miscdev);
+       ds1374->wdt.info = &ds1374_wdt_info;
+       ds1374->wdt.ops = &ds1374_wdt_ops;
+       ds1374->wdt.timeout = TIMER_MARGIN_DEFAULT;
+       ds1374->wdt.min_timeout = TIMER_MARGIN_MIN;
+       ds1374->wdt.max_timeout = TIMER_MARGIN_MAX;
+
+       watchdog_init_timeout(&ds1374->wdt, wdt_margin, &client->dev);
+       watchdog_set_nowayout(&ds1374->wdt, nowayout);
+       watchdog_stop_on_reboot(&ds1374->wdt);
+       watchdog_stop_on_unregister(&ds1374->wdt);
+       watchdog_set_drvdata(&ds1374->wdt, ds1374);
+       ds1374_wdt_settimeout(&ds1374->wdt, ds1374->wdt.timeout);
+
+       ret = devm_watchdog_register_device(&client->dev, &ds1374->wdt);
        if (ret)
                return ret;
-       ret = register_reboot_notifier(&ds1374_wdt_notifier);
-       if (ret) {
-               misc_deregister(&ds1374_miscdev);
-               return ret;
-       }
-       ds1374_wdt_settimeout(131072);
 #endif
 
        return 0;
@@ -670,11 +537,6 @@ static int ds1374_probe(struct i2c_client *client,
 static int ds1374_remove(struct i2c_client *client)
 {
        struct ds1374 *ds1374 = i2c_get_clientdata(client);
-#ifdef CONFIG_RTC_DRV_DS1374_WDT
-       misc_deregister(&ds1374_miscdev);
-       ds1374_miscdev.parent = NULL;
-       unregister_reboot_notifier(&ds1374_wdt_notifier);
-#endif
 
        if (client->irq > 0) {
                mutex_lock(&ds1374->mutex);
index 2779715..6349d2c 100644 (file)
@@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
                rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
                writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
                writel(rtc_alarm64, base + TIMER_ALARM_LOW);
+               writel(1, base + TIMER_IRQ_ENABLED);
        } else {
                /*
                 * if this function was called with enabled=0
index f21dc6b..8d141d8 100644 (file)
@@ -95,7 +95,7 @@
 
 /**
  * struct imxdi_dev - private imxdi rtc data
- * @pdev: pionter to platform dev
+ * @pdev: pointer to platform dev
  * @rtc: pointer to rtc struct
  * @ioaddr: IO registers pointer
  * @clk: input reference clock
@@ -350,7 +350,7 @@ static int di_handle_invalid_and_failure_state(struct imxdi_dev *imxdi, u32 dsr)
                         * the tamper register is locked. We cannot disable the
                         * tamper detection. The TDCHL can only be reset by a
                         * DRYICE POR, but we cannot force a DRYICE POR in
-                        * softwere because we are still in "FAILURE STATE".
+                        * software because we are still in "FAILURE STATE".
                         * We need a DRYICE POR via battery power cycling....
                         */
                        /*
index 03ebcf1..d51cc12 100644 (file)
@@ -805,17 +805,36 @@ static int max77686_rtc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int max77686_rtc_suspend(struct device *dev)
 {
+       struct max77686_rtc_info *info = dev_get_drvdata(dev);
+       int ret = 0;
+
        if (device_may_wakeup(dev)) {
                struct max77686_rtc_info *info = dev_get_drvdata(dev);
 
-               return enable_irq_wake(info->virq);
+               ret = enable_irq_wake(info->virq);
        }
 
-       return 0;
+       /*
+        * If the main IRQ (not virtual) is the parent IRQ, then it must be
+        * disabled during suspend because if it happens while suspended it
+        * will be handled before resuming I2C.
+        *
+        * Since Main IRQ is shared, all its users should disable it to be sure
+        * it won't fire while one of them is still suspended.
+        */
+       if (!info->drv_data->rtc_irq_from_platform)
+               disable_irq(info->rtc_irq);
+
+       return ret;
 }
 
 static int max77686_rtc_resume(struct device *dev)
 {
+       struct max77686_rtc_info *info = dev_get_drvdata(dev);
+
+       if (!info->drv_data->rtc_irq_from_platform)
+               enable_irq(info->rtc_irq);
+
        if (device_may_wakeup(dev)) {
                struct max77686_rtc_info *info = dev_get_drvdata(dev);
 
index 1660d5e..21cbf7f 100644 (file)
@@ -7,7 +7,7 @@
  * based on other Linux RTC drivers
  *
  * Device datasheet:
- * http://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf
+ * https://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf
  */
 
 #include <linux/module.h>
index 9c56707..ed63169 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/regmap.h>
 #include <linux/watchdog.h>
 
 #define PCF2127_BIT_CTRL1_TSF1                 BIT(4)
 /* Control register 2 */
 #define PCF2127_REG_CTRL2              0x01
+#define PCF2127_BIT_CTRL2_AIE                  BIT(1)
 #define PCF2127_BIT_CTRL2_TSIE                 BIT(2)
+#define PCF2127_BIT_CTRL2_AF                   BIT(4)
 #define PCF2127_BIT_CTRL2_TSF2                 BIT(5)
+#define PCF2127_BIT_CTRL2_WDTF                 BIT(6)
 /* Control register 3 */
 #define PCF2127_REG_CTRL3              0x02
 #define PCF2127_BIT_CTRL3_BLIE                 BIT(0)
 #define PCF2127_REG_DW                 0x07
 #define PCF2127_REG_MO                 0x08
 #define PCF2127_REG_YR                 0x09
+/* Alarm registers */
+#define PCF2127_REG_ALARM_SC           0x0A
+#define PCF2127_REG_ALARM_MN           0x0B
+#define PCF2127_REG_ALARM_HR           0x0C
+#define PCF2127_REG_ALARM_DM           0x0D
+#define PCF2127_REG_ALARM_DW           0x0E
+#define PCF2127_BIT_ALARM_AE                   BIT(7)
 /* Watchdog registers */
 #define PCF2127_REG_WD_CTL             0x10
 #define PCF2127_BIT_WD_CTL_TF0                 BIT(0)
@@ -324,6 +335,112 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
        .set_timeout = pcf2127_wdt_set_timeout,
 };
 
+/* Alarm */
+static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+       unsigned int buf[5], ctrl2;
+       int ret;
+
+       ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+       if (ret)
+               return ret;
+
+       ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+       if (ret)
+               return ret;
+
+       ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
+                              sizeof(buf));
+       if (ret)
+               return ret;
+
+       alrm->enabled = ctrl2 & PCF2127_BIT_CTRL2_AIE;
+       alrm->pending = ctrl2 & PCF2127_BIT_CTRL2_AF;
+
+       alrm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
+       alrm->time.tm_min = bcd2bin(buf[1] & 0x7F);
+       alrm->time.tm_hour = bcd2bin(buf[2] & 0x3F);
+       alrm->time.tm_mday = bcd2bin(buf[3] & 0x3F);
+
+       return 0;
+}
+
+static int pcf2127_rtc_alarm_irq_enable(struct device *dev, u32 enable)
+{
+       struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+       int ret;
+
+       ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
+                                PCF2127_BIT_CTRL2_AIE,
+                                enable ? PCF2127_BIT_CTRL2_AIE : 0);
+       if (ret)
+               return ret;
+
+       return pcf2127_wdt_active_ping(&pcf2127->wdd);
+}
+
+static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+       uint8_t buf[5];
+       int ret;
+
+       ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
+                                PCF2127_BIT_CTRL2_AF, 0);
+       if (ret)
+               return ret;
+
+       ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
+       if (ret)
+               return ret;
+
+       buf[0] = bin2bcd(alrm->time.tm_sec);
+       buf[1] = bin2bcd(alrm->time.tm_min);
+       buf[2] = bin2bcd(alrm->time.tm_hour);
+       buf[3] = bin2bcd(alrm->time.tm_mday);
+       buf[4] = PCF2127_BIT_ALARM_AE; /* Do not match on week day */
+
+       ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
+                               sizeof(buf));
+       if (ret)
+               return ret;
+
+       return pcf2127_rtc_alarm_irq_enable(dev, alrm->enabled);
+}
+
+static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
+{
+       struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+       unsigned int ctrl2 = 0;
+       int ret = 0;
+
+       ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+       if (ret)
+               return IRQ_NONE;
+
+       if (!(ctrl2 & PCF2127_BIT_CTRL2_AF))
+               return IRQ_NONE;
+
+       regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+                    ctrl2 & ~(PCF2127_BIT_CTRL2_AF | PCF2127_BIT_CTRL2_WDTF));
+
+       rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
+
+       pcf2127_wdt_active_ping(&pcf2127->wdd);
+
+       return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops pcf2127_rtc_alrm_ops = {
+       .ioctl            = pcf2127_rtc_ioctl,
+       .read_time        = pcf2127_rtc_read_time,
+       .set_time         = pcf2127_rtc_set_time,
+       .read_alarm       = pcf2127_rtc_read_alarm,
+       .set_alarm        = pcf2127_rtc_set_alarm,
+       .alarm_irq_enable = pcf2127_rtc_alarm_irq_enable,
+};
+
 /* sysfs interface */
 
 static ssize_t timestamp0_store(struct device *dev,
@@ -416,7 +533,7 @@ static const struct attribute_group pcf2127_attr_group = {
 };
 
 static int pcf2127_probe(struct device *dev, struct regmap *regmap,
-                       const char *name, bool has_nvmem)
+                        int alarm_irq, const char *name, bool has_nvmem)
 {
        struct pcf2127 *pcf2127;
        u32 wdd_timeout;
@@ -440,6 +557,23 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
        pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
        pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
        pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
+       pcf2127->rtc->uie_unsupported = 1;
+
+       if (alarm_irq >= 0) {
+               ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
+                                               pcf2127_rtc_irq,
+                                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                               dev_name(dev), dev);
+               if (ret) {
+                       dev_err(dev, "failed to request alarm irq\n");
+                       return ret;
+               }
+       }
+
+       if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) {
+               device_init_wakeup(dev, true);
+               pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
+       }
 
        pcf2127->wdd.parent = dev;
        pcf2127->wdd.info = &pcf2127_wdt_info;
@@ -553,6 +687,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
 static const struct of_device_id pcf2127_of_match[] = {
        { .compatible = "nxp,pcf2127" },
        { .compatible = "nxp,pcf2129" },
+       { .compatible = "nxp,pca2129" },
        {}
 };
 MODULE_DEVICE_TABLE(of, pcf2127_of_match);
@@ -657,13 +792,14 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
                return PTR_ERR(regmap);
        }
 
-       return pcf2127_probe(&client->dev, regmap,
+       return pcf2127_probe(&client->dev, regmap, client->irq,
                             pcf2127_i2c_driver.driver.name, id->driver_data);
 }
 
 static const struct i2c_device_id pcf2127_i2c_id[] = {
        { "pcf2127", 1 },
        { "pcf2129", 0 },
+       { "pca2129", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
@@ -722,13 +858,15 @@ static int pcf2127_spi_probe(struct spi_device *spi)
                return PTR_ERR(regmap);
        }
 
-       return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name,
+       return pcf2127_probe(&spi->dev, regmap, spi->irq,
+                            pcf2127_spi_driver.driver.name,
                             spi_get_device_id(spi)->driver_data);
 }
 
 static const struct spi_device_id pcf2127_spi_id[] = {
        { "pcf2127", 1 },
        { "pcf2129", 0 },
+       { "pca2129", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
index 7a87f46..ca55ba9 100644 (file)
@@ -21,8 +21,8 @@
 /*
  * Information for this driver was pulled from the following datasheets.
  *
- *  http://www.nxp.com/documents/data_sheet/PCF85063A.pdf
- *  http://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
+ *  https://www.nxp.com/documents/data_sheet/PCF85063A.pdf
+ *  https://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
  *
  *  PCF85063A -- Rev. 6 — 18 November 2015
  *  PCF85063TP -- Rev. 4 — 6 May 2015
index 40d7450..c6b8927 100644 (file)
@@ -275,6 +275,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
        struct pl031_local *ldata = dev_get_drvdata(dev);
 
        writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR);
+       pl031_alarm_irq_enable(dev, alarm->enabled);
 
        return 0;
 }
index d5880f5..5896e52 100644 (file)
@@ -818,7 +818,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
 static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
                          struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-       int rc = EINVAL;
+       int rc;
        u32 _nr_apqns, *_apqns = NULL;
        struct keytoken_header *hdr = (struct keytoken_header *)key;
 
@@ -886,7 +886,7 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
                              u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
                              struct pkey_apqn *apqns, size_t *nr_apqns)
 {
-       int rc = -EINVAL;
+       int rc;
        u32 _nr_apqns, *_apqns = NULL;
 
        if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
index c795f22..140186f 100644 (file)
@@ -434,7 +434,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
                return;
        }
 
-       del_timer(&req->timer);
+       del_timer_sync(&req->timer);
        zfcp_fsf_protstatus_eval(req);
        zfcp_fsf_fsfstatus_eval(req);
        req->handler(req);
@@ -867,7 +867,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
        req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
        req->issued = get_tod_clock();
        if (zfcp_qdio_send(qdio, &req->qdio_req)) {
-               del_timer(&req->timer);
+               del_timer_sync(&req->timer);
                /* lookup request again, list might have changed */
                zfcp_reqlist_find_rm(adapter->req_list, req_id);
                zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
index 85c7959..1409c76 100644 (file)
@@ -256,9 +256,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
                WARN_ON(!fcf_dev);
                new->fcf_dev = NULL;
                fcoe_fcf_device_delete(fcf_dev);
-               kfree(new);
                mutex_unlock(&cdev->lock);
        }
+       kfree(new);
 }
 
 /**
index 19721db..d8cbc9c 100644 (file)
@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 
        if (PTR_ERR(fp) == -FC_EX_CLOSED)
                goto out;
-       if (IS_ERR(fp))
-               goto redisc;
+       if (IS_ERR(fp)) {
+               mutex_lock(&disc->disc_mutex);
+               fc_disc_restart(disc);
+               mutex_unlock(&disc->disc_mutex);
+               goto out;
+       }
 
        cp = fc_frame_payload_get(fp, sizeof(*cp));
        if (!cp)
@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
                                new_rdata->disc_id = disc->disc_id;
                                fc_rport_login(new_rdata);
                        }
-                       goto out;
+                       goto free_fp;
                }
                rdata->disc_id = disc->disc_id;
                mutex_unlock(&rdata->rp_mutex);
@@ -626,6 +630,8 @@ redisc:
                fc_disc_restart(disc);
                mutex_unlock(&disc->disc_mutex);
        }
+free_fp:
+       fc_frame_free(fp);
 out:
        kref_put(&rdata->kref, fc_rport_destroy);
        if (!IS_ERR(fp))
index a62c60c..ece6c25 100644 (file)
@@ -6679,9 +6679,15 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
                }
        } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
                switch (phba->fc_linkspeed) {
+               case LPFC_ASYNC_LINK_SPEED_1GBPS:
+                       fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+                       break;
                case LPFC_ASYNC_LINK_SPEED_10GBPS:
                        fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
                        break;
+               case LPFC_ASYNC_LINK_SPEED_20GBPS:
+                       fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
+                       break;
                case LPFC_ASYNC_LINK_SPEED_25GBPS:
                        fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
                        break;
@@ -7406,12 +7412,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 void
 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
 {
-       if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+       int  logit = 0;
+
+       if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
                phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
-       if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
+               logit = 1;
+       }
+       if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
                phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
-       if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+               logit = 1;
+       }
+       if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
                phba->cfg_irq_chann = phba->cfg_hdw_queue;
+               logit = 1;
+       }
+       if (logit)
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2006 Reducing Queues - CPU limitation: "
+                               "IRQ %d HDWQ %d\n",
+                               phba->cfg_irq_chann,
+                               phba->cfg_hdw_queue);
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
            phba->nvmet_support) {
index 1d88fed..6f9d648 100644 (file)
@@ -2494,13 +2494,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
        diag_status_reply = (struct diag_status *)
                            bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
-       if (job->reply_len <
-           sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+       if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
                                "3012 Received Run link diag test reply "
                                "below minimum size (%d): reply_len:%d\n",
-                               (int)(sizeof(struct fc_bsg_request) +
-                               sizeof(struct diag_status)),
+                               (int)(sizeof(*bsg_reply) +
+                               sizeof(*diag_status_reply)),
                                job->reply_len);
                rc = -EINVAL;
                goto job_error;
@@ -3418,8 +3417,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job)
        event_reply = (struct get_mgmt_rev_reply *)
                bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
-       if (job->reply_len <
-           sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+       if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
                                "2741 Received GET_DFC_REV reply below "
                                "minimum size\n");
@@ -5202,8 +5200,8 @@ lpfc_menlo_cmd(struct bsg_job *job)
                goto no_dd_data;
        }
 
-       if (job->reply_len <
-           sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+       if (job->reply_len < sizeof(*bsg_reply) +
+                               sizeof(struct menlo_response)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
                                "2785 Received MENLO_CMD reply below "
                                "minimum size\n");
@@ -5359,9 +5357,7 @@ lpfc_forced_link_speed(struct bsg_job *job)
        forced_reply = (struct forced_link_speed_support_reply *)
                bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
-       if (job->reply_len <
-           sizeof(struct fc_bsg_request) +
-           sizeof(struct forced_link_speed_support_reply)) {
+       if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
                                "0049 Received FORCED_LINK_SPEED reply below "
                                "minimum size\n");
@@ -5715,8 +5711,7 @@ lpfc_get_trunk_info(struct bsg_job *job)
        event_reply = (struct lpfc_trunk_info *)
                bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
-       if (job->reply_len <
-           sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
+       if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
                                "2728 Received GET TRUNK _INFO reply below "
                                "minimum size\n");
index dd9f2bf..ef2015f 100644 (file)
@@ -713,7 +713,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* This is a GID_FT completing so the gidft_inp counter was
                 * incremented before the GID_FT was issued to the wire.
                 */
-               vport->gidft_inp--;
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
 
                /*
                 * Skip processing the NS response
@@ -741,11 +742,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                goto out;
 
                        /* CT command is being retried */
-                       vport->gidft_inp--;
                        rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
                                         vport->fc_ns_retry, type);
                        if (rc == 0)
                                goto out;
+                       else { /* Unable to send NS cmd */
+                               if (vport->gidft_inp)
+                                       vport->gidft_inp--;
+                       }
                }
                if (vport->fc_flag & FC_RSCN_MODE)
                        lpfc_els_flush_rscn(vport);
@@ -825,7 +829,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                (uint32_t) CTrsp->ReasonCode,
                                (uint32_t) CTrsp->Explanation);
                }
-               vport->gidft_inp--;
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
        }
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -918,7 +923,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* This is a GID_PT completing so the gidft_inp counter was
                 * incremented before the GID_PT was issued to the wire.
                 */
-               vport->gidft_inp--;
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
 
                /*
                 * Skip processing the NS response
@@ -942,11 +948,14 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                vport->fc_ns_retry++;
 
                        /* CT command is being retried */
-                       vport->gidft_inp--;
                        rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
                                         vport->fc_ns_retry, GID_PT_N_PORT);
                        if (rc == 0)
                                goto out;
+                       else { /* Unable to send NS cmd */
+                               if (vport->gidft_inp)
+                                       vport->gidft_inp--;
+                       }
                }
                if (vport->fc_flag & FC_RSCN_MODE)
                        lpfc_els_flush_rscn(vport);
@@ -1027,7 +1036,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                (uint32_t)CTrsp->ReasonCode,
                                (uint32_t)CTrsp->Explanation);
                }
-               vport->gidft_inp--;
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
        }
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
index 85d4e40..48dc63f 100644 (file)
@@ -3937,10 +3937,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                case LSRJT_UNABLE_TPC:
                        /* The driver has a VALID PLOGI but the rport has
                         * rejected the PRLI - can't do it now.  Delay
-                        * for 1 second and try again - don't care about
-                        * the explanation.
+                        * for 1 second and try again.
+                        *
+                        * However, if explanation is REQ_UNSUPPORTED there's
+                        * no point to retry PRLI.
                         */
-                       if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
+                       if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
+                           stat.un.b.lsRjtRsnCodeExp !=
+                           LSEXP_REQ_UNSUPPORTED) {
                                delay = 1000;
                                maxretry = lpfc_max_els_tries + 1;
                                retry = 1;
index c4a7e82..c697259 100644 (file)
@@ -4577,6 +4577,13 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
        struct lpfc_hba   *phba = vport->phba;
 
        fc_host_supported_speeds(shost) = 0;
+       /*
+        * Avoid reporting supported link speed for FCoE as it can't be
+        * controlled via FCoE.
+        */
+       if (phba->hba_flag & HBA_FCOE_MODE)
+               return;
+
        if (phba->lmt & LMT_128Gb)
                fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
        if (phba->lmt & LMT_64Gb)
@@ -4910,6 +4917,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
                case LPFC_ASYNC_LINK_SPEED_40GBPS:
                        port_speed = 40000;
                        break;
+               case LPFC_ASYNC_LINK_SPEED_100GBPS:
+                       port_speed = 100000;
+                       break;
                default:
                        port_speed = 0;
                }
@@ -8589,7 +8599,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                "VPI(B:%d M:%d) "
                                "VFI(B:%d M:%d) "
                                "RPI(B:%d M:%d) "
-                               "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
+                               "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
                                phba->sli4_hba.extents_in_use,
                                phba->sli4_hba.max_cfg_param.xri_base,
                                phba->sli4_hba.max_cfg_param.max_xri,
@@ -8603,7 +8613,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                phba->sli4_hba.max_cfg_param.max_eq,
                                phba->sli4_hba.max_cfg_param.max_cq,
                                phba->sli4_hba.max_cfg_param.max_wq,
-                               phba->sli4_hba.max_cfg_param.max_rq);
+                               phba->sli4_hba.max_cfg_param.max_rq,
+                               phba->lmt);
 
                /*
                 * Calculate queue resources based on how
@@ -8626,7 +8637,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                if ((phba->cfg_irq_chann > qmin) ||
                    (phba->cfg_hdw_queue > qmin)) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                                       "2005 Reducing Queues: "
+                                       "2005 Reducing Queues - "
+                                       "FW resource limitation: "
                                        "WQ %d CQ %d EQ %d: min %d: "
                                        "IRQ %d HDWQ %d\n",
                                        phba->sli4_hba.max_cfg_param.max_wq,
@@ -14100,17 +14112,18 @@ lpfc_init(void)
                printk(KERN_ERR "Could not register lpfcmgmt device, "
                        "misc_register returned with status %d", error);
 
+       error = -ENOMEM;
        lpfc_transport_functions.vport_create = lpfc_vport_create;
        lpfc_transport_functions.vport_delete = lpfc_vport_delete;
        lpfc_transport_template =
                                fc_attach_transport(&lpfc_transport_functions);
        if (lpfc_transport_template == NULL)
-               return -ENOMEM;
+               goto unregister;
        lpfc_vport_transport_template =
                fc_attach_transport(&lpfc_vport_transport_functions);
        if (lpfc_vport_transport_template == NULL) {
                fc_release_transport(lpfc_transport_template);
-               return -ENOMEM;
+               goto unregister;
        }
        lpfc_nvme_cmd_template();
        lpfc_nvmet_cmd_template();
@@ -14136,6 +14149,8 @@ unwind:
 cpuhp_failure:
        fc_release_transport(lpfc_transport_template);
        fc_release_transport(lpfc_vport_transport_template);
+unregister:
+       misc_deregister(&lpfc_mgmt_dev);
 
        return error;
 }
index e4c710f..cad53d1 100644 (file)
@@ -1745,7 +1745,13 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
                }
        }
 
-       if (ndlp->nlp_type & NLP_FCP_TARGET) {
+       if (ndlp->nlp_type & NLP_FCP_TARGET)
+               ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+       if (ndlp->nlp_type & NLP_NVME_TARGET)
+               ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+
+       if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
        } else {
index a4430ae..d4ade7c 100644 (file)
@@ -2110,7 +2110,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                }
                tgtp->tport_unreg_cmp = &tport_unreg_cmp;
                nvmet_fc_unregister_targetport(phba->targetport);
-               if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+               if (!wait_for_completion_timeout(&tport_unreg_cmp,
                                        msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                        "6179 Unreg targetport x%px timeout "
index 8582b51..4cd7ded 100644 (file)
@@ -13650,7 +13650,11 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
                        spin_unlock_irqrestore(&phba->hbalock, iflags);
                        /* Handle MDS Loopback frames */
-                       lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
+                       if  (!(phba->pport->load_flag & FC_UNLOADING))
+                               lpfc_sli4_handle_mds_loopback(phba->pport,
+                                                             dma_buf);
+                       else
+                               lpfc_in_buf_free(phba, &dma_buf->dbuf);
                        break;
                }
 
@@ -18363,7 +18367,10 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
            fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
                vport = phba->pport;
                /* Handle MDS Loopback frames */
-               lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+               if  (!(phba->pport->load_flag & FC_UNLOADING))
+                       lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+               else
+                       lpfc_in_buf_free(phba, &dmabuf->dbuf);
                return;
        }
 
index 1987c66..20adec4 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.8.0.2"
+#define LPFC_DRIVER_VERSION "12.8.0.3"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 91eb690..e1d7de6 100644 (file)
@@ -380,5 +380,8 @@ extern int qla24xx_soft_reset(struct qla_hw_data *);
 static inline int
 ql_mask_match(uint level)
 {
+       if (ql2xextended_error_logging == 1)
+               ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+
        return (level & ql2xextended_error_logging) == level;
 }
index 8c92af5..1bc090d 100644 (file)
@@ -3880,6 +3880,7 @@ struct qla_hw_data {
                uint32_t        scm_supported_f:1;
                                /* Enabled in Driver */
                uint32_t        scm_enabled:1;
+               uint32_t        max_req_queue_warned:1;
        } flags;
 
        uint16_t max_exchg;
index df670fb..de9fd7f 100644 (file)
@@ -1505,11 +1505,11 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
 static uint
 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
 {
+       uint speeds = 0;
+
        if (IS_CNA_CAPABLE(ha))
                return FDMI_PORT_SPEED_10GB;
        if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
-               uint speeds = 0;
-
                if (ha->max_supported_speed == 2) {
                        if (ha->min_supported_speed <= 6)
                                speeds |= FDMI_PORT_SPEED_64GB;
@@ -1536,9 +1536,16 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
                }
                return speeds;
        }
-       if (IS_QLA2031(ha))
-               return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
-                       FDMI_PORT_SPEED_4GB;
+       if (IS_QLA2031(ha)) {
+               if ((ha->pdev->subsystem_vendor == 0x103C) &&
+                   (ha->pdev->subsystem_device == 0x8002)) {
+                       speeds = FDMI_PORT_SPEED_16GB;
+               } else {
+                       speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+                               FDMI_PORT_SPEED_4GB;
+               }
+               return speeds;
+       }
        if (IS_QLA25XX(ha))
                return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
                        FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
@@ -3436,7 +3443,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                                if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
                                        fcport->scan_state = QLA_FCPORT_SCAN;
-                                       fcport->logout_on_delete = 0;
                                }
                        }
                        goto login_logout;
@@ -3532,10 +3538,22 @@ login_logout:
                }
 
                if (fcport->scan_state != QLA_FCPORT_FOUND) {
+                       bool do_delete = false;
+
+                       if (fcport->scan_needed &&
+                           fcport->disc_state == DSC_LOGIN_PEND) {
+                               /* Cable got disconnected after we sent
+                                * a login. Do delete to prevent timeout.
+                                */
+                               fcport->logout_on_delete = 1;
+                               do_delete = true;
+                       }
+
                        fcport->scan_needed = 0;
-                       if ((qla_dual_mode_enabled(vha) ||
-                               qla_ini_mode_enabled(vha)) &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
+                       if (((qla_dual_mode_enabled(vha) ||
+                             qla_ini_mode_enabled(vha)) &&
+                           atomic_read(&fcport->state) == FCS_ONLINE) ||
+                               do_delete) {
                                if (fcport->loop_id != FC_NO_LOOP_ID) {
                                        if (fcport->flags & FCF_FCP2_DEVICE)
                                                fcport->logout_on_delete = 0;
@@ -3736,6 +3754,18 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
                unsigned long flags;
                const char *name = sp->name;
 
+               if (res == QLA_OS_TIMER_EXPIRED) {
+                       /* switch is ignoring all commands.
+                        * This might be a zone disable behavior.
+                        * This means we hit 64s timeout.
+                        * 22s GPNFT + 44s Abort = 64s
+                        */
+                       ql_dbg(ql_dbg_disc, vha, 0xffff,
+                              "%s: Switch Zone check please .\n",
+                              name);
+                       qla2x00_mark_all_devices_lost(vha);
+               }
+
                /*
                 * We are in an Interrupt context, queue up this
                 * sp for GNNFT_DONE work. This will allow all
index 27bcd34..ab5275d 100644 (file)
@@ -2024,8 +2024,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                                res = DID_ERROR << 16;
                        }
                }
-               ql_dbg(ql_dbg_user, vha, 0x503f,
-                   "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
+               ql_dbg(ql_dbg_disc, vha, 0x503f,
+                   "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
                    type, sp->handle, comp_status, fw_status[1], fw_status[2],
                    le32_to_cpu(ese->total_byte_count));
                goto els_ct_done;
index 7388343..226f142 100644 (file)
@@ -334,14 +334,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (time_after(jiffies, wait_time))
                                break;
 
-                       /*
-                        * Check if it's UNLOADING, cause we cannot poll in
-                        * this case, or else a NULL pointer dereference
-                        * is triggered.
-                        */
-                       if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
-                               return QLA_FUNCTION_TIMEOUT;
-
                        /* Check for pending interrupts. */
                        qla2x00_poll(ha->rsp_q_map[0]);
 
@@ -5240,7 +5232,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
        mcp->mb[8] = MSW(risc_addr);
        mcp->out_mb = MBX_8|MBX_1|MBX_0;
        mcp->in_mb = MBX_3|MBX_2|MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (rval != QLA_SUCCESS) {
@@ -5428,7 +5420,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
        mcp->mb[8] = MSW(risc_addr);
        mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_1|MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (rval != QLA_SUCCESS) {
@@ -5700,7 +5692,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
        mcp->mb[9] = vha->vp_idx;
        mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (mb != NULL) {
@@ -5787,7 +5779,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
 
        mcp->out_mb = MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
 
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -5822,7 +5814,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
 
        mcp->out_mb = MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
 
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -6014,7 +6006,7 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        if (IS_QLA8031(ha))
                mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
        mcp->in_mb = MBX_0;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
 
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -6050,7 +6042,7 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
        mcp->in_mb = MBX_2|MBX_1|MBX_0;
        if (IS_QLA8031(ha))
                mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
-       mcp->tov = 30;
+       mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
 
        rval = qla2x00_mailbox_command(vha, mcp);
index fa695a4..90bbc61 100644 (file)
@@ -536,6 +536,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
        struct nvme_private *priv = fd->private;
        struct qla_nvme_rport *qla_rport = rport->private;
 
+       if (!priv) {
+               /* nvme association has been torn down */
+               return rval;
+       }
+
        fcport = qla_rport->fcport;
 
        if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
@@ -687,7 +692,15 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
        tmpl = &qla_nvme_fc_transport;
 
        WARN_ON(vha->nvme_local_port);
-       WARN_ON(ha->max_req_queues < 3);
+
+       if (ha->max_req_queues < 3) {
+               if (!ha->flags.max_req_queue_warned)
+                       ql_log(ql_log_info, vha, 0x2120,
+                              "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
+                              __func__, ha->max_req_queues);
+               ha->flags.max_req_queue_warned = 1;
+               return ret;
+       }
 
        qla_nvme_fc_transport.max_hw_queues =
            min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
index 9b59f03..8da00ba 100644 (file)
@@ -2017,6 +2017,11 @@ skip_pio:
        /* Determine queue resources */
        ha->max_req_queues = ha->max_rsp_queues = 1;
        ha->msix_count = QLA_BASE_VECTORS;
+
+       /* Check if FW supports MQ or not */
+       if (!(ha->fw_attributes & BIT_6))
+               goto mqiobase_exit;
+
        if (!ql2xmqsupport || !ql2xnvmeenable ||
            (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
                goto mqiobase_exit;
@@ -2829,10 +2834,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* This may fail but that's ok */
        pci_enable_pcie_error_reporting(pdev);
 
-       /* Turn off T10-DIF when FC-NVMe is enabled */
-       if (ql2xnvmeenable)
-               ql2xenabledif = 0;
-
        ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
        if (!ha) {
                ql_log_pci(ql_log_fatal, pdev, 0x0009,
index fbb80a0..9028916 100644 (file)
@@ -1270,7 +1270,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 
        qla24xx_chk_fcp_state(sess);
 
-       ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+       ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
            "Scheduling sess %p for deletion %8phC\n",
            sess, sess->port_name);
 
index 064ed68..139f007 100644 (file)
@@ -5490,9 +5490,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                                u64 d = ktime_get_boottime_ns() - ns_from_boot;
 
                                if (kt <= d) {  /* elapsed duration >= kt */
+                                       spin_lock_irqsave(&sqp->qc_lock, iflags);
                                        sqcp->a_cmnd = NULL;
                                        atomic_dec(&devip->num_in_q);
                                        clear_bit(k, sqp->in_use_bm);
+                                       spin_unlock_irqrestore(&sqp->qc_lock, iflags);
                                        if (new_sd_dp)
                                                kfree(sd_dp);
                                        /* call scsi_done() from this thread */
index e443dee..c9abed8 100644 (file)
@@ -1526,7 +1526,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
        list_add_tail(&rphy->list, &sas_host->rphy_list);
        if (identify->device_type == SAS_END_DEVICE &&
            (identify->target_port_protocols &
-            (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA)))
+            (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA)))
                rphy->scsi_target_id = sas_host->next_target_id++;
        else if (identify->device_type == SAS_END_DEVICE)
                rphy->scsi_target_id = -1;
index acde0ca..95018e6 100644 (file)
@@ -2578,8 +2578,6 @@ sd_print_capacity(struct scsi_disk *sdkp,
                sd_printk(KERN_NOTICE, sdkp,
                          "%u-byte physical blocks\n",
                          sdkp->physical_block_size);
-
-       sd_zbc_print_zones(sdkp);
 }
 
 /* called with buffer of length 512 */
@@ -3220,6 +3218,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
        sd_config_write_same(sdkp);
        kfree(buffer);
 
+       /*
+        * For a zoned drive, revalidating the zones can be done only once
+        * the gendisk capacity is set. So if this fails, set back the gendisk
+        * capacity to 0.
+        */
+       if (sd_zbc_revalidate_zones(sdkp))
+               set_capacity_revalidate_and_notify(disk, 0, false);
+
  out:
        return 0;
 }
index 27c0f4e..4933e7d 100644 (file)
@@ -75,7 +75,9 @@ struct scsi_disk {
        struct opal_dev *opal_dev;
 #ifdef CONFIG_BLK_DEV_ZONED
        u32             nr_zones;
+       u32             rev_nr_zones;
        u32             zone_blocks;
+       u32             rev_zone_blocks;
        u32             zones_optimal_open;
        u32             zones_optimal_nonseq;
        u32             zones_max_open;
@@ -215,8 +217,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
 
 int sd_zbc_init_disk(struct scsi_disk *sdkp);
 void sd_zbc_release_disk(struct scsi_disk *sdkp);
-extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
                                         unsigned char op, bool all);
 unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
@@ -242,7 +244,10 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
        return 0;
 }
 
-static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+{
+       return 0;
+}
 
 static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
                                                       unsigned char op,
index 4717e79..0e94ff0 100644 (file)
@@ -634,6 +634,23 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
        return 0;
 }
 
+static void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+       if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+               return;
+
+       if (sdkp->capacity & (sdkp->zone_blocks - 1))
+               sd_printk(KERN_NOTICE, sdkp,
+                         "%u zones of %u logical blocks + 1 runt zone\n",
+                         sdkp->nr_zones - 1,
+                         sdkp->zone_blocks);
+       else
+               sd_printk(KERN_NOTICE, sdkp,
+                         "%u zones of %u logical blocks\n",
+                         sdkp->nr_zones,
+                         sdkp->zone_blocks);
+}
+
 static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
 {
        struct scsi_disk *sdkp = scsi_disk(disk);
@@ -641,36 +658,31 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
        swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
 }
 
-static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp,
-                                  u32 zone_blocks,
-                                  unsigned int nr_zones)
+int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
 {
        struct gendisk *disk = sdkp->disk;
+       struct request_queue *q = disk->queue;
+       u32 zone_blocks = sdkp->rev_zone_blocks;
+       unsigned int nr_zones = sdkp->rev_nr_zones;
+       u32 max_append;
        int ret = 0;
 
+       if (!sd_is_zoned(sdkp))
+               return 0;
+
        /*
         * Make sure revalidate zones are serialized to ensure exclusive
         * updates of the scsi disk data.
         */
        mutex_lock(&sdkp->rev_mutex);
 
-       /*
-        * Revalidate the disk zones to update the device request queue zone
-        * bitmaps and the zone write pointer offset array. Do this only once
-        * the device capacity is set on the second revalidate execution for
-        * disk scan or if something changed when executing a normal revalidate.
-        */
-       if (sdkp->first_scan) {
-               sdkp->zone_blocks = zone_blocks;
-               sdkp->nr_zones = nr_zones;
-               goto unlock;
-       }
-
        if (sdkp->zone_blocks == zone_blocks &&
            sdkp->nr_zones == nr_zones &&
            disk->queue->nr_zones == nr_zones)
                goto unlock;
 
+       sdkp->zone_blocks = zone_blocks;
+       sdkp->nr_zones = nr_zones;
        sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
        if (!sdkp->rev_wp_offset) {
                ret = -ENOMEM;
@@ -682,6 +694,21 @@ static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp,
        kvfree(sdkp->rev_wp_offset);
        sdkp->rev_wp_offset = NULL;
 
+       if (ret) {
+               sdkp->zone_blocks = 0;
+               sdkp->nr_zones = 0;
+               sdkp->capacity = 0;
+               goto unlock;
+       }
+
+       max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
+                          q->limits.max_segments << (PAGE_SHIFT - 9));
+       max_append = min_t(u32, max_append, queue_max_hw_sectors(q));
+
+       blk_queue_max_zone_append_sectors(q, max_append);
+
+       sd_zbc_print_zones(sdkp);
+
 unlock:
        mutex_unlock(&sdkp->rev_mutex);
 
@@ -694,7 +721,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
        struct request_queue *q = disk->queue;
        unsigned int nr_zones;
        u32 zone_blocks = 0;
-       u32 max_append;
        int ret;
 
        if (!sd_is_zoned(sdkp))
@@ -728,22 +754,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
        sdkp->device->use_16_for_rw = 1;
        sdkp->device->use_10_for_rw = 0;
 
-       ret = sd_zbc_revalidate_zones(sdkp, zone_blocks, nr_zones);
-       if (ret)
-               goto err;
-
-       /*
-        * On the first scan 'chunk_sectors' isn't setup yet, so calling
-        * blk_queue_max_zone_append_sectors() will result in a WARN(). Defer
-        * this setting to the second scan.
-        */
-       if (sdkp->first_scan)
-               return 0;
-
-       max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks),
-                          q->limits.max_segments << (PAGE_SHIFT - 9));
-
-       blk_queue_max_zone_append_sectors(q, max_append);
+       sdkp->rev_nr_zones = nr_zones;
+       sdkp->rev_zone_blocks = zone_blocks;
 
        return 0;
 
@@ -753,23 +765,6 @@ err:
        return ret;
 }
 
-void sd_zbc_print_zones(struct scsi_disk *sdkp)
-{
-       if (!sd_is_zoned(sdkp) || !sdkp->capacity)
-               return;
-
-       if (sdkp->capacity & (sdkp->zone_blocks - 1))
-               sd_printk(KERN_NOTICE, sdkp,
-                         "%u zones of %u logical blocks + 1 runt zone\n",
-                         sdkp->nr_zones - 1,
-                         sdkp->zone_blocks);
-       else
-               sd_printk(KERN_NOTICE, sdkp,
-                         "%u zones of %u logical blocks\n",
-                         sdkp->nr_zones,
-                         sdkp->zone_blocks);
-}
-
 int sd_zbc_init_disk(struct scsi_disk *sdkp)
 {
        if (!sd_is_zoned(sdkp))
index 46bb905..eafe0db 100644 (file)
@@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
        /* Select MPHY refclk frequency */
        clk = devm_clk_get(dev, NULL);
        if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
                dev_err(dev, "Cannot claim MPHY clock.\n");
                goto clk_err;
        }
index 29cd017..1755dd6 100644 (file)
@@ -212,7 +212,7 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
        ktime_t timeout, time_checked;
        u32 val;
 
-       timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms));
+       timeout = ktime_add_ms(ktime_get(), max_wait_ms);
        do {
                time_checked = ktime_get();
                ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
index f407b13..5a95a7b 100644 (file)
@@ -44,11 +44,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
        return err;
 }
 
+static int ufs_intel_ehl_init(struct ufs_hba *hba)
+{
+       hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+       return 0;
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .link_startup_notify    = ufs_intel_link_startup_notify,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_ehl_init,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+};
+
 #ifdef CONFIG_PM_SLEEP
 /**
  * ufshcd_pci_suspend - suspend power management function
@@ -177,8 +189,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
 static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
-       { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
-       { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { }     /* terminate list */
 };
 
index 3076222..da199fa 100644 (file)
@@ -1561,6 +1561,7 @@ unblock_reqs:
 int ufshcd_hold(struct ufs_hba *hba, bool async)
 {
        int rc = 0;
+       bool flush_result;
        unsigned long flags;
 
        if (!ufshcd_is_clkgating_allowed(hba))
@@ -1592,7 +1593,9 @@ start:
                                break;
                        }
                        spin_unlock_irqrestore(hba->host->host_lock, flags);
-                       flush_work(&hba->clk_gating.ungate_work);
+                       flush_result = flush_work(&hba->clk_gating.ungate_work);
+                       if (hba->clk_gating.is_suspended && !flush_result)
+                               goto out;
                        spin_lock_irqsave(hba->host->host_lock, flags);
                        goto start;
                }
@@ -5941,7 +5944,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  */
 static irqreturn_t ufshcd_intr(int irq, void *__hba)
 {
-       u32 intr_status, enabled_intr_status;
+       u32 intr_status, enabled_intr_status = 0;
        irqreturn_t retval = IRQ_NONE;
        struct ufs_hba *hba = __hba;
        int retries = hba->nutrs;
@@ -5955,7 +5958,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
         * read, make sure we handle them by checking the interrupt status
         * again in a loop until we process all of the reqs before returning.
         */
-       do {
+       while (intr_status && retries--) {
                enabled_intr_status =
                        intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
                if (intr_status)
@@ -5964,9 +5967,9 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
                        retval |= ufshcd_sl_intr(hba, enabled_intr_status);
 
                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-       } while (intr_status && --retries);
+       }
 
-       if (retval == IRQ_NONE) {
+       if (enabled_intr_status && retval == IRQ_NONE) {
                dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
                                        __func__, intr_status);
                ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
@@ -6434,14 +6437,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                goto out;
        }
 
-       if (!(reg & (1 << tag))) {
-               dev_err(hba->dev,
-               "%s: cmd was completed, but without a notifying intr, tag = %d",
-               __func__, tag);
-       }
-
        /* Print Transfer Request of aborted task */
-       dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+       dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
 
        /*
         * Print detailed info about aborted request.
@@ -6462,6 +6459,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        }
        hba->req_abort_count++;
 
+       if (!(reg & (1 << tag))) {
+               dev_err(hba->dev,
+               "%s: cmd was completed, but without a notifying intr, tag = %d",
+               __func__, tag);
+               goto cleanup;
+       }
+
        /* Skip task abort in case previous aborts failed and report failure */
        if (lrbp->req_abort_skip) {
                err = -EIO;
@@ -6492,7 +6496,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                        /* command completed already */
                        dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
                                __func__, tag);
-                       goto out;
+                       goto cleanup;
                } else {
                        dev_err(hba->dev,
                                "%s: no response from device. tag = %d, err %d\n",
@@ -6526,6 +6530,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                goto out;
        }
 
+cleanup:
        scsi_dma_unmap(cmd);
 
        spin_lock_irqsave(host->host_lock, flags);
index b2ef18f..363589c 100644 (file)
@@ -520,6 +520,12 @@ enum ufshcd_quirks {
         * OCS FATAL ERROR with device error through sense data
         */
        UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR             = 1 << 10,
+
+       /*
+        * This quirk needs to be enabled if the host controller has
+        * auto-hibernate capability but it doesn't work.
+        */
+       UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8                = 1 << 11,
 };
 
 enum ufshcd_caps {
@@ -803,7 +809,8 @@ return true;
 
 static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
 {
-       return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+       return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+               !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
 }
 
 static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
index 8cc003a..ca1c39b 100644 (file)
@@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = {
 
 #define virtscsi_config_get(vdev, fld) \
        ({ \
-               typeof(((struct virtio_scsi_config *)0)->fld) __val; \
+               __virtio_native_type(struct virtio_scsi_config, fld) __val; \
                virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
                __val; \
        })
 
 #define virtscsi_config_set(vdev, fld, val) \
        do { \
-               typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
+               __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
                virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
        } while(0)
 
index eeb028b..fd72d90 100644 (file)
@@ -36,21 +36,6 @@ static void sh_clk_write(int value, struct clk *clk)
                iowrite32(value, clk->mapped_reg);
 }
 
-static unsigned int r8(const void __iomem *addr)
-{
-       return ioread8(addr);
-}
-
-static unsigned int r16(const void __iomem *addr)
-{
-       return ioread16(addr);
-}
-
-static unsigned int r32(const void __iomem *addr)
-{
-       return ioread32(addr);
-}
-
 static int sh_clk_mstp_enable(struct clk *clk)
 {
        sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
@@ -61,11 +46,11 @@ static int sh_clk_mstp_enable(struct clk *clk)
                        (phys_addr_t)clk->enable_reg + clk->mapped_reg;
 
                if (clk->flags & CLK_ENABLE_REG_8BIT)
-                       read = r8;
+                       read = ioread8;
                else if (clk->flags & CLK_ENABLE_REG_16BIT)
-                       read = r16;
+                       read = ioread16;
                else
-                       read = r32;
+                       read = ioread32;
 
                for (i = 1000;
                     (read(mapped_status) & (1 << clk->enable_bit)) && i;
index c3008e4..c6ea760 100644 (file)
@@ -1017,4 +1017,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
 
 endif # SPI_SLAVE
 
+config SPI_DYNAMIC
+       def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+
 endif # SPI
index 4c643df..d4b33b3 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 #include <linux/spi/spi.h>
@@ -441,7 +442,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
 {
        u32 div, mbrdiv;
 
-       div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+       /* Ensure spi->clk_rate is even */
+       div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
 
        /*
         * SPI framework set xfer->speed_hz to master->max_speed_hz if
@@ -467,20 +469,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
 /**
  * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
  * @spi: pointer to the spi controller data structure
+ * @xfer_len: length of the message to be transferred
  */
-static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
+static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
 {
-       u32 fthlv, half_fifo;
+       u32 fthlv, half_fifo, packet;
 
        /* data packet should not exceed 1/2 of fifo space */
        half_fifo = (spi->fifo_size / 2);
 
+       /* data_packet should not exceed transfer length */
+       if (half_fifo > xfer_len)
+               packet = xfer_len;
+       else
+               packet = half_fifo;
+
        if (spi->cur_bpw <= 8)
-               fthlv = half_fifo;
+               fthlv = packet;
        else if (spi->cur_bpw <= 16)
-               fthlv = half_fifo / 2;
+               fthlv = packet / 2;
        else
-               fthlv = half_fifo / 4;
+               fthlv = packet / 4;
 
        /* align packet size with data registers access */
        if (spi->cur_bpw > 8)
@@ -488,6 +497,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
        else
                fthlv -= (fthlv % 4); /* multiple of 4 */
 
+       if (!fthlv)
+               fthlv = 1;
+
        return fthlv;
 }
 
@@ -966,13 +978,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
                if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
                        stm32h7_spi_read_rxfifo(spi, false);
 
-       writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
+       writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
 
        spin_unlock_irqrestore(&spi->lock, flags);
 
        if (end) {
-               spi_finalize_current_transfer(master);
                stm32h7_spi_disable(spi);
+               spi_finalize_current_transfer(master);
        }
 
        return IRQ_HANDLED;
@@ -1393,7 +1405,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
        cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
                     STM32H7_SPI_CFG1_DSIZE;
 
-       spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
+       spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
        fthlv = spi->cur_fthlv - 1;
 
        cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
@@ -1585,39 +1597,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
        unsigned long flags;
        unsigned int comm_type;
        int nb_words, ret = 0;
+       int mbr;
 
        spin_lock_irqsave(&spi->lock, flags);
 
-       if (spi->cur_bpw != transfer->bits_per_word) {
-               spi->cur_bpw = transfer->bits_per_word;
-               spi->cfg->set_bpw(spi);
-       }
-
-       if (spi->cur_speed != transfer->speed_hz) {
-               int mbr;
+       spi->cur_xferlen = transfer->len;
 
-               /* Update spi->cur_speed with real clock speed */
-               mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
-                                           spi->cfg->baud_rate_div_min,
-                                           spi->cfg->baud_rate_div_max);
-               if (mbr < 0) {
-                       ret = mbr;
-                       goto out;
-               }
+       spi->cur_bpw = transfer->bits_per_word;
+       spi->cfg->set_bpw(spi);
 
-               transfer->speed_hz = spi->cur_speed;
-               stm32_spi_set_mbr(spi, mbr);
+       /* Update spi->cur_speed with real clock speed */
+       mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+                                   spi->cfg->baud_rate_div_min,
+                                   spi->cfg->baud_rate_div_max);
+       if (mbr < 0) {
+               ret = mbr;
+               goto out;
        }
 
+       transfer->speed_hz = spi->cur_speed;
+       stm32_spi_set_mbr(spi, mbr);
+
        comm_type = stm32_spi_communication_type(spi_dev, transfer);
-       if (spi->cur_comm != comm_type) {
-               ret = spi->cfg->set_mode(spi, comm_type);
+       ret = spi->cfg->set_mode(spi, comm_type);
+       if (ret < 0)
+               goto out;
 
-               if (ret < 0)
-                       goto out;
-
-               spi->cur_comm = comm_type;
-       }
+       spi->cur_comm = comm_type;
 
        if (spi->cfg->set_data_idleness)
                spi->cfg->set_data_idleness(spi, transfer->len);
@@ -1635,8 +1641,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
                        goto out;
        }
 
-       spi->cur_xferlen = transfer->len;
-
        dev_dbg(spi->dev, "transfer communication mode set to %d\n",
                spi->cur_comm);
        dev_dbg(spi->dev,
@@ -1996,6 +2000,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
+       pinctrl_pm_select_sleep_state(&pdev->dev);
+
        return 0;
 }
 
@@ -2007,13 +2013,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
 
        clk_disable_unprepare(spi->clk);
 
-       return 0;
+       return pinctrl_pm_select_sleep_state(dev);
 }
 
 static int stm32_spi_runtime_resume(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct stm32_spi *spi = spi_master_get_devdata(master);
+       int ret;
+
+       ret = pinctrl_pm_select_default_state(dev);
+       if (ret)
+               return ret;
 
        return clk_prepare_enable(spi->clk);
 }
@@ -2043,10 +2054,23 @@ static int stm32_spi_resume(struct device *dev)
                return ret;
 
        ret = spi_master_resume(master);
-       if (ret)
+       if (ret) {
                clk_disable_unprepare(spi->clk);
+               return ret;
+       }
 
-       return ret;
+       ret = pm_runtime_get_sync(dev);
+       if (ret) {
+               dev_err(dev, "Unable to power device:%d\n", ret);
+               return ret;
+       }
+
+       spi->cfg->config(spi);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       return 0;
 }
 #endif
 
index 6626587..dc12af0 100644 (file)
@@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
  */
 static DEFINE_MUTEX(board_lock);
 
+/*
+ * Prevents addition of devices with same chip select and
+ * addition of devices below an unregistering controller.
+ */
+static DEFINE_MUTEX(spi_add_lock);
+
 /**
  * spi_alloc_device - Allocate a new SPI device
  * @ctlr: Controller to which device is connected
@@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data)
  */
 int spi_add_device(struct spi_device *spi)
 {
-       static DEFINE_MUTEX(spi_add_lock);
        struct spi_controller *ctlr = spi->controller;
        struct device *dev = ctlr->dev.parent;
        int status;
@@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi)
                goto done;
        }
 
+       /* Controller may unregister concurrently */
+       if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
+           !device_is_registered(&ctlr->dev)) {
+               status = -ENODEV;
+               goto done;
+       }
+
        /* Descriptors take precedence */
        if (ctlr->cs_gpiods)
                spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
@@ -2795,6 +2807,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
        struct spi_controller *found;
        int id = ctlr->bus_num;
 
+       /* Prevent addition of new devices, unregister existing ones */
+       if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+               mutex_lock(&spi_add_lock);
+
        device_for_each_child(&ctlr->dev, NULL, __unregister);
 
        /* First make sure that this controller was ever added */
@@ -2815,6 +2831,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
        if (found == ctlr)
                idr_remove(&spi_master_idr, id);
        mutex_unlock(&board_lock);
+
+       if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
+               mutex_unlock(&spi_add_lock);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_controller);
 
index 0369405..27c85f2 100644 (file)
@@ -31,7 +31,7 @@ void iscsit_put_transport(struct iscsit_transport *t)
        module_put(t->owner);
 }
 
-int iscsit_register_transport(struct iscsit_transport *t)
+void iscsit_register_transport(struct iscsit_transport *t)
 {
        INIT_LIST_HEAD(&t->t_node);
 
@@ -40,8 +40,6 @@ int iscsit_register_transport(struct iscsit_transport *t)
        mutex_unlock(&transport_mutex);
 
        pr_debug("Registered iSCSI transport: %s\n", t->name);
-
-       return 0;
 }
 EXPORT_SYMBOL(iscsit_register_transport);
 
index a2e710d..b668224 100644 (file)
@@ -499,4 +499,15 @@ config SPRD_THERMAL
        help
          Support for the Spreadtrum thermal sensor driver in the Linux thermal
          framework.
+
+config KHADAS_MCU_FAN_THERMAL
+       tristate "Khadas MCU controller FAN cooling support"
+       depends on OF || COMPILE_TEST
+       depends on MFD_KHADAS_MCU
+       select MFD_CORE
+       select REGMAP
+       help
+         If you say yes here you get support for the FAN controlled
+         by the Microcontroller found on the Khadas VIM boards.
+
 endif
index b8d96d2..b64dd50 100644 (file)
@@ -61,3 +61,4 @@ obj-$(CONFIG_ZX2967_THERMAL)  += zx2967_thermal.o
 obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
 obj-$(CONFIG_AMLOGIC_THERMAL)     += amlogic_thermal.o
 obj-$(CONFIG_SPRD_THERMAL)     += sprd_thermal.o
+obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL)   += khadas_mcu_fan.o
diff --git a/drivers/thermal/khadas_mcu_fan.c b/drivers/thermal/khadas_mcu_fan.c
new file mode 100644 (file)
index 0000000..9eadd2d
--- /dev/null
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Khadas MCU Controlled FAN driver
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/khadas-mcu.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+
+#define MAX_LEVEL 3
+
+struct khadas_mcu_fan_ctx {
+       struct khadas_mcu *mcu;
+       unsigned int level;
+       struct thermal_cooling_device *cdev;
+};
+
+static int khadas_mcu_fan_set_level(struct khadas_mcu_fan_ctx *ctx,
+                                   unsigned int level)
+{
+       int ret;
+
+       ret = regmap_write(ctx->mcu->regmap, KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG,
+                          level);
+       if (ret)
+               return ret;
+
+       ctx->level = level;
+
+       return 0;
+}
+
+static int khadas_mcu_fan_get_max_state(struct thermal_cooling_device *cdev,
+                                       unsigned long *state)
+{
+       *state = MAX_LEVEL;
+
+       return 0;
+}
+
+static int khadas_mcu_fan_get_cur_state(struct thermal_cooling_device *cdev,
+                                       unsigned long *state)
+{
+       struct khadas_mcu_fan_ctx *ctx = cdev->devdata;
+
+       *state = ctx->level;
+
+       return 0;
+}
+
+static int
+khadas_mcu_fan_set_cur_state(struct thermal_cooling_device *cdev,
+                            unsigned long state)
+{
+       struct khadas_mcu_fan_ctx *ctx = cdev->devdata;
+
+       if (state > MAX_LEVEL)
+               return -EINVAL;
+
+       if (state == ctx->level)
+               return 0;
+
+       return khadas_mcu_fan_set_level(ctx, state);
+}
+
+static const struct thermal_cooling_device_ops khadas_mcu_fan_cooling_ops = {
+       .get_max_state = khadas_mcu_fan_get_max_state,
+       .get_cur_state = khadas_mcu_fan_get_cur_state,
+       .set_cur_state = khadas_mcu_fan_set_cur_state,
+};
+
+static int khadas_mcu_fan_probe(struct platform_device *pdev)
+{
+       struct khadas_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+       struct thermal_cooling_device *cdev;
+       struct device *dev = &pdev->dev;
+       struct khadas_mcu_fan_ctx *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->mcu = mcu;
+       platform_set_drvdata(pdev, ctx);
+
+       cdev = devm_thermal_of_cooling_device_register(dev->parent,
+                       dev->parent->of_node, "khadas-mcu-fan", ctx,
+                       &khadas_mcu_fan_cooling_ops);
+       if (IS_ERR(cdev)) {
+               ret = PTR_ERR(cdev);
+               dev_err(dev, "Failed to register khadas-mcu-fan as cooling device: %d\n",
+                       ret);
+               return ret;
+       }
+       ctx->cdev = cdev;
+       thermal_cdev_update(cdev);
+
+       return 0;
+}
+
+static void khadas_mcu_fan_shutdown(struct platform_device *pdev)
+{
+       struct khadas_mcu_fan_ctx *ctx = platform_get_drvdata(pdev);
+
+       khadas_mcu_fan_set_level(ctx, 0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int khadas_mcu_fan_suspend(struct device *dev)
+{
+       struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev);
+       unsigned int level_save = ctx->level;
+       int ret;
+
+       ret = khadas_mcu_fan_set_level(ctx, 0);
+       if (ret)
+               return ret;
+
+       ctx->level = level_save;
+
+       return 0;
+}
+
+static int khadas_mcu_fan_resume(struct device *dev)
+{
+       struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev);
+
+       return khadas_mcu_fan_set_level(ctx, ctx->level);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(khadas_mcu_fan_pm, khadas_mcu_fan_suspend,
+                        khadas_mcu_fan_resume);
+
+static const struct platform_device_id khadas_mcu_fan_id_table[] = {
+       { .name = "khadas-mcu-fan-ctrl", },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, khadas_mcu_fan_id_table);
+
+static struct platform_driver khadas_mcu_fan_driver = {
+       .probe          = khadas_mcu_fan_probe,
+       .shutdown       = khadas_mcu_fan_shutdown,
+       .driver = {
+               .name           = "khadas-mcu-fan-ctrl",
+               .pm             = &khadas_mcu_fan_pm,
+       },
+       .id_table       = khadas_mcu_fan_id_table,
+};
+
+module_platform_driver(khadas_mcu_fan_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Khadas MCU FAN driver");
+MODULE_LICENSE("GPL");
index acb8b62..a4d7881 100644 (file)
@@ -17,13 +17,13 @@ static int __ida_init(struct kunit_resource *res, void *context)
        struct ida *ida = context;
 
        ida_init(ida);
-       res->allocation = ida;
+       res->data = ida;
        return 0;
 }
 
 static void __ida_destroy(struct kunit_resource *res)
 {
-       struct ida *ida = res->allocation;
+       struct ida *ida = res->data;
 
        ida_destroy(ida);
 }
index d93a69b..4271c40 100644 (file)
@@ -29,4 +29,23 @@ config IFCVF
          To compile this driver as a module, choose M here: the module will
          be called ifcvf.
 
+config MLX5_VDPA
+       bool "MLX5 VDPA support library for ConnectX devices"
+       depends on MLX5_CORE
+       default n
+       help
+         Support library for Mellanox VDPA drivers. Provides code that is
+         common for all types of VDPA drivers. The following drivers are planned:
+         net, block.
+
+config MLX5_VDPA_NET
+       tristate "vDPA driver for ConnectX devices"
+       depends on MLX5_VDPA
+       default n
+       help
+         VDPA network driver for ConnectX6 and newer. Provides offloading
+         of virtio net datapath such that descriptors put on the ring will
+         be executed by the hardware. It also supports a variety of stateless
+         offloads depending on the actual device used and firmware version.
+
 endif # VDPA
index 8bbb686..d160e9b 100644 (file)
@@ -2,3 +2,4 @@
 obj-$(CONFIG_VDPA) += vdpa.o
 obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
 obj-$(CONFIG_IFCVF)    += ifcvf/
+obj-$(CONFIG_MLX5_VDPA) += mlx5/
index 94bf032..f2a128e 100644 (file)
@@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw)
        return 0;
 }
 
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
 {
        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
        void __iomem *avail_idx_addr;
@@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
        return last_avail_idx;
 }
 
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
 {
        struct ifcvf_lm_cfg __iomem *ifcvf_lm;
        void __iomem *avail_idx_addr;
index f455441..08f267a 100644 (file)
@@ -29,7 +29,7 @@
                 (1ULL << VIRTIO_F_VERSION_1)                   | \
                 (1ULL << VIRTIO_NET_F_STATUS)                  | \
                 (1ULL << VIRTIO_F_ORDER_PLATFORM)              | \
-                (1ULL << VIRTIO_F_IOMMU_PLATFORM)              | \
+                (1ULL << VIRTIO_F_ACCESS_PLATFORM)             | \
                 (1ULL << VIRTIO_NET_F_MRG_RXBUF))
 
 /* Only one queue pair for now. */
@@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
 void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
 void ifcvf_reset(struct ifcvf_hw *hw);
 u64 ifcvf_get_features(struct ifcvf_hw *hw);
-u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
-int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num);
+u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
 #endif /* _IFCVF_H_ */
index f5a60c1..076d7ac 100644 (file)
@@ -50,8 +50,10 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
        int i;
 
 
-       for (i = 0; i < queues; i++)
+       for (i = 0; i < queues; i++) {
                devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+               vf->vring[i].irq = -EINVAL;
+       }
 
        ifcvf_free_irq_vectors(pdev);
 }
@@ -235,19 +237,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
        return IFCVF_QUEUE_MAX;
 }
 
-static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
+static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+                                  struct vdpa_vq_state *state)
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return ifcvf_get_vq_state(vf, qid);
+       state->avail_index = ifcvf_get_vq_state(vf, qid);
+       return 0;
 }
 
 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
-                                  u64 num)
+                                  const struct vdpa_vq_state *state)
 {
        struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
 
-       return ifcvf_set_vq_state(vf, qid, num);
+       return ifcvf_set_vq_state(vf, qid, state->avail_index);
 }
 
 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
@@ -352,6 +356,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
        vf->config_cb.private = cb->private;
 }
 
+static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
+                                u16 qid)
+{
+       struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+       return vf->vring[qid].irq;
+}
+
 /*
  * IFCVF currently does't have on-chip IOMMU, so not
  * implemented set_map()/dma_map()/dma_unmap()
@@ -369,6 +381,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
        .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
        .set_vq_num     = ifcvf_vdpa_set_vq_num,
        .set_vq_address = ifcvf_vdpa_set_vq_address,
+       .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
        .kick_vq        = ifcvf_vdpa_kick_vq,
        .get_generation = ifcvf_vdpa_get_generation,
        .get_device_id  = ifcvf_vdpa_get_device_id,
@@ -384,7 +397,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct device *dev = &pdev->dev;
        struct ifcvf_adapter *adapter;
        struct ifcvf_hw *vf;
-       int ret;
+       int ret, i;
 
        ret = pcim_enable_device(pdev);
        if (ret) {
@@ -420,7 +433,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
-                                   dev, &ifc_vdpa_ops);
+                                   dev, &ifc_vdpa_ops,
+                                   IFCVF_MAX_QUEUE_PAIRS * 2);
        if (adapter == NULL) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
                return -ENOMEM;
@@ -441,6 +455,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err;
        }
 
+       for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+               vf->vring[i].irq = -EINVAL;
+
        ret = vdpa_register_device(&adapter->vdpa);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile
new file mode 100644 (file)
index 0000000..89a5bed
--- /dev/null
@@ -0,0 +1,4 @@
+subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core
+
+obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o
+mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
new file mode 100644 (file)
index 0000000..5c92a57
--- /dev/null
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_H__
+#define __MLX5_VDPA_H__
+
+#include <linux/vdpa.h>
+#include <linux/mlx5/driver.h>
+
+struct mlx5_vdpa_direct_mr {
+       u64 start;
+       u64 end;
+       u32 perm;
+       struct mlx5_core_mkey mr;
+       struct sg_table sg_head;
+       int log_size;
+       int nsg;
+       struct list_head list;
+       u64 offset;
+};
+
+struct mlx5_vdpa_mr {
+       struct mlx5_core_mkey mkey;
+
+       /* list of direct MRs descendants of this indirect mr */
+       struct list_head head;
+       unsigned long num_directs;
+       unsigned long num_klms;
+       bool initialized;
+
+       /* serialize mkey creation and destruction */
+       struct mutex mkey_mtx;
+};
+
+struct mlx5_vdpa_resources {
+       u32 pdn;
+       struct mlx5_uars_page *uar;
+       void __iomem *kick_addr;
+       u16 uid;
+       u32 null_mkey;
+       bool valid;
+};
+
+struct mlx5_vdpa_dev {
+       struct vdpa_device vdev;
+       struct mlx5_core_dev *mdev;
+       struct mlx5_vdpa_resources res;
+
+       u64 mlx_features;
+       u64 actual_features;
+       u8 status;
+       u32 max_vqs;
+       u32 generation;
+
+       struct mlx5_vdpa_mr mr;
+};
+
+int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
+int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+                         int inlen);
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey);
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+                            bool *change_map);
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
+
+#define mlx5_vdpa_warn(__dev, format, ...)                                                         \
+       dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__,     \
+                current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_info(__dev, format, ...)                                                         \
+       dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,             \
+                current->pid, ##__VA_ARGS__)
+
+#define mlx5_vdpa_dbg(__dev, format, ...)                                                          \
+       dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__,            \
+                 current->pid, ##__VA_ARGS__)
+
+#endif /* __MLX5_VDPA_H__ */
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h
new file mode 100644 (file)
index 0000000..f6f57a2
--- /dev/null
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VDPA_IFC_H_
+#define __MLX5_VDPA_IFC_H_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+enum {
+       MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE  = 0x0,
+       MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE       = 0x1,
+       MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE     = 0x2,
+};
+
+enum {
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT   = 0x1, // do I check this caps?
+       MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED  = 0x2,
+};
+
+enum {
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT   = 0,
+       MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED  = 1,
+};
+
+struct mlx5_ifc_virtio_q_bits {
+       u8    virtio_q_type[0x8];
+       u8    reserved_at_8[0x5];
+       u8    event_mode[0x3];
+       u8    queue_index[0x10];
+
+       u8    full_emulation[0x1];
+       u8    virtio_version_1_0[0x1];
+       u8    reserved_at_22[0x2];
+       u8    offload_type[0x4];
+       u8    event_qpn_or_msix[0x18];
+
+       u8    doorbell_stride_index[0x10];
+       u8    queue_size[0x10];
+
+       u8    device_emulation_id[0x20];
+
+       u8    desc_addr[0x40];
+
+       u8    used_addr[0x40];
+
+       u8    available_addr[0x40];
+
+       u8    virtio_q_mkey[0x20];
+
+       u8    max_tunnel_desc[0x10];
+       u8    reserved_at_170[0x8];
+       u8    error_type[0x8];
+
+       u8    umem_1_id[0x20];
+
+       u8    umem_1_size[0x20];
+
+       u8    umem_1_offset[0x40];
+
+       u8    umem_2_id[0x20];
+
+       u8    umem_2_size[0x20];
+
+       u8    umem_2_offset[0x40];
+
+       u8    umem_3_id[0x20];
+
+       u8    umem_3_size[0x20];
+
+       u8    umem_3_offset[0x40];
+
+       u8    counter_set_id[0x20];
+
+       u8    reserved_at_320[0x8];
+       u8    pd[0x18];
+
+       u8    reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+       u8    modify_field_select[0x40];
+
+       u8    reserved_at_40[0x20];
+
+       u8    vhca_id[0x10];
+       u8    reserved_at_70[0x10];
+
+       u8    queue_feature_bit_mask_12_3[0xa];
+       u8    dirty_bitmap_dump_enable[0x1];
+       u8    vhost_log_page[0x5];
+       u8    reserved_at_90[0xc];
+       u8    state[0x4];
+
+       u8    reserved_at_a0[0x5];
+       u8    queue_feature_bit_mask_2_0[0x3];
+       u8    tisn_or_qpn[0x18];
+
+       u8    dirty_bitmap_mkey[0x20];
+
+       u8    dirty_bitmap_size[0x20];
+
+       u8    dirty_bitmap_addr[0x40];
+
+       u8    hw_available_index[0x10];
+       u8    hw_used_index[0x10];
+
+       u8    reserved_at_160[0xa0];
+
+       struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+       struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+       struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+       MLX5_VIRTQ_MODIFY_MASK_STATE                    = (u64)1 << 0,
+       MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS      = (u64)1 << 3,
+       MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+       MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT     = 0x0,
+       MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY      = 0x1,
+       MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND  = 0x2,
+       MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR      = 0x3,
+};
+
+enum {
+       MLX5_RQTC_LIST_Q_TYPE_RQ            = 0x0,
+       MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q  = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+       struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+       struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+#endif /* __MLX5_VDPA_IFC_H_ */
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
new file mode 100644 (file)
index 0000000..ef1c550
--- /dev/null
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <linux/gcd.h>
+#include <linux/string.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
+#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
+({ \
+       u64 __s = _s; \
+       u64 _res; \
+       _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
+       _res; \
+})
+
+static int get_octo_len(u64 len, int page_shift)
+{
+       u64 page_size = 1ULL << page_shift;
+       int npages;
+
+       npages = ALIGN(len, page_size) >> page_shift;
+       return (npages + 1) / 2;
+}
+
+static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
+{
+       struct scatterlist *sg;
+       __be64 *pas;
+       int i;
+
+       pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+       for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+               (*pas) = cpu_to_be64(sg_dma_address(sg));
+}
+
+static void mlx5_set_access_mode(void *mkc, int mode)
+{
+       MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+       MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
+}
+
+static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
+               mtt[i] = cpu_to_be64(sg_dma_address(sg));
+}
+
+static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+       int inlen;
+       void *mkc;
+       void *in;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+       fill_sg(mr, in);
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+       MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
+       MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
+       mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
+       MLX5_SET(mkc, mkc, qpn, 0xffffff);
+       MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+       MLX5_SET64(mkc, mkc, start_addr, mr->offset);
+       MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
+       MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
+       MLX5_SET(mkc, mkc, translations_octword_size,
+                get_octo_len(mr->end - mr->start, mr->log_size));
+       MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+                get_octo_len(mr->end - mr->start, mr->log_size));
+       populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
+       err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
+       kvfree(in);
+       if (err) {
+               mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
+               return err;
+       }
+
+       return 0;
+}
+
+static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+       mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
+}
+
+static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+       return max_t(u64, map->start, mr->start);
+}
+
+static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+       return min_t(u64, map->last + 1, mr->end);
+}
+
+static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
+{
+       return map_end(map, mr) - map_start(map, mr);
+}
+
+#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
+#define MLX5_VDPA_INVALID_LEN ((u64)-1)
+
+static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
+{
+       struct mlx5_vdpa_direct_mr *s;
+
+       s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+       if (!s)
+               return MLX5_VDPA_INVALID_START_ADDR;
+
+       return s->start;
+}
+
+static u64 indir_len(struct mlx5_vdpa_mr *mkey)
+{
+       struct mlx5_vdpa_direct_mr *s;
+       struct mlx5_vdpa_direct_mr *e;
+
+       s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+       if (!s)
+               return MLX5_VDPA_INVALID_LEN;
+
+       e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
+
+       return e->end - s->start;
+}
+
+#define LOG_MAX_KLM_SIZE 30
+#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
+
+static u32 klm_bcount(u64 size)
+{
+       return (u32)size;
+}
+
+static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
+{
+       struct mlx5_vdpa_direct_mr *dmr;
+       struct mlx5_klm *klmarr;
+       struct mlx5_klm *klm;
+       bool first = true;
+       u64 preve;
+       int i;
+
+       klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+       i = 0;
+       list_for_each_entry(dmr, &mkey->head, list) {
+again:
+               klm = &klmarr[i++];
+               if (first) {
+                       preve = dmr->start;
+                       first = false;
+               }
+
+               if (preve == dmr->start) {
+                       klm->key = cpu_to_be32(dmr->mr.key);
+                       klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
+                       preve = dmr->end;
+               } else {
+                       klm->key = cpu_to_be32(mvdev->res.null_mkey);
+                       klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
+                       preve = dmr->start;
+                       goto again;
+               }
+       }
+}
+
+static int klm_byte_size(int nklms)
+{
+       return 16 * ALIGN(nklms, 4);
+}
+
+static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
+{
+       int inlen;
+       void *mkc;
+       void *in;
+       int err;
+       u64 start;
+       u64 len;
+
+       start = indir_start_addr(mr);
+       len = indir_len(mr);
+       if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
+               return -EINVAL;
+
+       inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+       MLX5_SET(mkc, mkc, lw, 1);
+       MLX5_SET(mkc, mkc, lr, 1);
+       mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
+       MLX5_SET(mkc, mkc, qpn, 0xffffff);
+       MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
+       MLX5_SET64(mkc, mkc, start_addr, start);
+       MLX5_SET64(mkc, mkc, len, len);
+       MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
+       MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
+       fill_indir(mvdev, mr, in);
+       err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
+       kfree(in);
+       return err;
+}
+
+static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
+{
+       mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
+}
+
+static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
+                        struct vhost_iotlb *iotlb)
+{
+       struct vhost_iotlb_map *map;
+       unsigned long lgcd = 0;
+       int log_entity_size;
+       unsigned long size;
+       u64 start = 0;
+       int err;
+       struct page *pg;
+       unsigned int nsg;
+       int sglen;
+       u64 pa;
+       u64 paend;
+       struct scatterlist *sg;
+       struct device *dma = mvdev->mdev->device;
+       int ret;
+
+       for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+            map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
+               size = maplen(map, mr);
+               lgcd = gcd(lgcd, size);
+               start += size;
+       }
+       log_entity_size = ilog2(lgcd);
+
+       sglen = 1 << log_entity_size;
+       nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
+
+       err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
+       if (err)
+               return err;
+
+       sg = mr->sg_head.sgl;
+       for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+            map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+               paend = map->addr + maplen(map, mr);
+               for (pa = map->addr; pa < paend; pa += sglen) {
+                       pg = pfn_to_page(__phys_to_pfn(pa));
+                       if (!sg) {
+                               mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
+                                              map->start, map->last + 1);
+                               err = -ENOMEM;
+                               goto err_map;
+                       }
+                       sg_set_page(sg, pg, sglen, 0);
+                       sg = sg_next(sg);
+                       if (!sg)
+                               goto done;
+               }
+       }
+done:
+       mr->log_size = log_entity_size;
+       mr->nsg = nsg;
+       ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+       if (!ret)
+               goto err_map;
+
+       err = create_direct_mr(mvdev, mr);
+       if (err)
+               goto err_direct;
+
+       return 0;
+
+err_direct:
+       dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+err_map:
+       sg_free_table(&mr->sg_head);
+       return err;
+}
+
+static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
+{
+       struct device *dma = mvdev->mdev->device;
+
+       destroy_direct_mr(mvdev, mr);
+       dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+       sg_free_table(&mr->sg_head);
+}
+
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+                           struct vhost_iotlb *iotlb)
+{
+       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       struct mlx5_vdpa_direct_mr *dmr;
+       struct mlx5_vdpa_direct_mr *n;
+       LIST_HEAD(tmp);
+       u64 st;
+       u64 sz;
+       int err;
+       int i = 0;
+
+       st = start;
+       while (size) {
+               sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
+               dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
+               if (!dmr) {
+                       err = -ENOMEM;
+                       goto err_alloc;
+               }
+
+               dmr->start = st;
+               dmr->end = st + sz;
+               dmr->perm = perm;
+               err = map_direct_mr(mvdev, dmr, iotlb);
+               if (err) {
+                       kfree(dmr);
+                       goto err_alloc;
+               }
+
+               list_add_tail(&dmr->list, &tmp);
+               size -= sz;
+               mr->num_directs++;
+               mr->num_klms++;
+               st += sz;
+               i++;
+       }
+       list_splice_tail(&tmp, &mr->head);
+       return 0;
+
+err_alloc:
+       list_for_each_entry_safe(dmr, n, &mr->head, list) {
+               list_del_init(&dmr->list);
+               unmap_direct_mr(mvdev, dmr);
+               kfree(dmr);
+       }
+       return err;
+}
+
+/* The iotlb pointer contains a list of maps. Go over the maps, possibly
+ * merging mergeable maps, and create direct memory keys that provide the
+ * device access to memory. The direct mkeys are then referred to by the
+ * indirect memory key that provides access to the enitre address space given
+ * by iotlb.
+ */
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       struct mlx5_vdpa_direct_mr *dmr;
+       struct mlx5_vdpa_direct_mr *n;
+       struct vhost_iotlb_map *map;
+       u32 pperm = U16_MAX;
+       u64 last = U64_MAX;
+       u64 ps = U64_MAX;
+       u64 pe = U64_MAX;
+       u64 start = 0;
+       int err = 0;
+       int nnuls;
+
+       if (mr->initialized)
+               return 0;
+
+       INIT_LIST_HEAD(&mr->head);
+       for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+            map = vhost_iotlb_itree_next(map, start, last)) {
+               start = map->start;
+               if (pe == map->start && pperm == map->perm) {
+                       pe = map->last + 1;
+               } else {
+                       if (ps != U64_MAX) {
+                               if (pe < map->start) {
+                                       /* We have a hole in the map. Check how
+                                        * many null keys are required to fill it.
+                                        */
+                                       nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
+                                                                      LOG_MAX_KLM_SIZE);
+                                       mr->num_klms += nnuls;
+                               }
+                               err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+                               if (err)
+                                       goto err_chain;
+                       }
+                       ps = map->start;
+                       pe = map->last + 1;
+                       pperm = map->perm;
+               }
+       }
+       err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+       if (err)
+               goto err_chain;
+
+       /* Create the memory key that defines the guests's address space. This
+        * memory key refers to the direct keys that contain the MTT
+        * translations
+        */
+       err = create_indirect_key(mvdev, mr);
+       if (err)
+               goto err_chain;
+
+       mr->initialized = true;
+       return 0;
+
+err_chain:
+       list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+               list_del_init(&dmr->list);
+               unmap_direct_mr(mvdev, dmr);
+               kfree(dmr);
+       }
+       return err;
+}
+
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+{
+       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       int err;
+
+       mutex_lock(&mr->mkey_mtx);
+       err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+       mutex_unlock(&mr->mkey_mtx);
+       return err;
+}
+
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+{
+       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       struct mlx5_vdpa_direct_mr *dmr;
+       struct mlx5_vdpa_direct_mr *n;
+
+       mutex_lock(&mr->mkey_mtx);
+       if (!mr->initialized)
+               goto out;
+
+       destroy_indirect_key(mvdev, mr);
+       list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
+               list_del_init(&dmr->list);
+               unmap_direct_mr(mvdev, dmr);
+               kfree(dmr);
+       }
+       memset(mr, 0, sizeof(*mr));
+       mr->initialized = false;
+out:
+       mutex_unlock(&mr->mkey_mtx);
+}
+
+static bool map_empty(struct vhost_iotlb *iotlb)
+{
+       return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
+}
+
+int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+                            bool *change_map)
+{
+       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       int err = 0;
+
+       *change_map = false;
+       if (map_empty(iotlb)) {
+               mlx5_vdpa_destroy_mr(mvdev);
+               return 0;
+       }
+       mutex_lock(&mr->mkey_mtx);
+       if (mr->initialized) {
+               mlx5_vdpa_info(mvdev, "memory map update\n");
+               *change_map = true;
+       }
+       if (!*change_map)
+               err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+       mutex_unlock(&mr->mkey_mtx);
+
+       return err;
+}
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
new file mode 100644 (file)
index 0000000..96e6421
--- /dev/null
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_vdpa.h"
+
+static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
+{
+       struct mlx5_core_dev *mdev = dev->mdev;
+
+       u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+       int err;
+
+       MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+       MLX5_SET(alloc_pd_in, in, uid, uid);
+
+       err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
+       if (!err)
+               *pdn = MLX5_GET(alloc_pd_out, out, pd);
+
+       return err;
+}
+
+static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+       struct mlx5_core_dev *mdev = dev->mdev;
+
+       MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+       MLX5_SET(dealloc_pd_in, in, pd, pdn);
+       MLX5_SET(dealloc_pd_in, in, uid, uid);
+       return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
+}
+
+static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
+{
+       u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+       struct mlx5_core_dev *mdev = dev->mdev;
+       int err;
+
+       MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+       err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
+       if (!err)
+               *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
+       return err;
+}
+
+static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
+{
+       u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+       int inlen;
+       void *in;
+       int err;
+
+       /* 0 means not supported */
+       if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
+               return -EOPNOTSUPP;
+
+       inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+       MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
+
+       err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+       kfree(in);
+       if (!err)
+               *uid = MLX5_GET(create_uctx_out, out, uid);
+
+       return err;
+}
+
+static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
+{
+       u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+
+       MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+       MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+       mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
+       int err;
+
+       MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+       MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
+       err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
+       if (!err)
+               *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+       return err;
+}
+
+void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+       MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+       MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
+       MLX5_SET(destroy_tis_in, in, tisn, tisn);
+       mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
+}
+
+int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
+       int err;
+
+       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+       err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+       return err;
+}
+
+void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+
+       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+       MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
+       MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+       mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
+}
+
+int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+       int err;
+
+       MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+       err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
+       if (!err)
+               *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+       return err;
+}
+
+void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+
+       MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+       MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
+       MLX5_SET(destroy_tir_in, in, tirn, tirn);
+       mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
+}
+
+int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
+{
+       u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+       int err;
+
+       MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+       MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
+
+       err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
+       if (!err)
+               *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
+
+       return err;
+}
+
+void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
+
+       MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+       MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
+       MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+       mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
+}
+
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+                         int inlen)
+{
+       u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
+       u32 mkey_index;
+       void *mkc;
+       int err;
+
+       MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+       MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
+
+       err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
+       if (err)
+               return err;
+
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+       mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+       mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+       mkey->size = MLX5_GET64(mkc, mkc, len);
+       mkey->key |= mlx5_idx_to_mkey(mkey_index);
+       mkey->pd = MLX5_GET(mkc, mkc, pd);
+       return 0;
+}
+
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
+
+       MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
+       MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+       MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+       return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
+}
+
+int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
+{
+       u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
+       struct mlx5_vdpa_resources *res = &mvdev->res;
+       struct mlx5_core_dev *mdev = mvdev->mdev;
+       u64 kick_addr;
+       int err;
+
+       if (res->valid) {
+               mlx5_vdpa_warn(mvdev, "resources already allocated\n");
+               return -EINVAL;
+       }
+       mutex_init(&mvdev->mr.mkey_mtx);
+       res->uar = mlx5_get_uars_page(mdev);
+       if (IS_ERR(res->uar)) {
+               err = PTR_ERR(res->uar);
+               goto err_uars;
+       }
+
+       err = create_uctx(mvdev, &res->uid);
+       if (err)
+               goto err_uctx;
+
+       err = alloc_pd(mvdev, &res->pdn, res->uid);
+       if (err)
+               goto err_pd;
+
+       err = get_null_mkey(mvdev, &res->null_mkey);
+       if (err)
+               goto err_key;
+
+       kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+       res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
+       if (!res->kick_addr) {
+               err = -ENOMEM;
+               goto err_key;
+       }
+       res->valid = true;
+
+       return 0;
+
+err_key:
+       dealloc_pd(mvdev, res->pdn, res->uid);
+err_pd:
+       destroy_uctx(mvdev, res->uid);
+err_uctx:
+       mlx5_put_uars_page(mdev, res->uar);
+err_uars:
+       mutex_destroy(&mvdev->mr.mkey_mtx);
+       return err;
+}
+
+void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
+{
+       struct mlx5_vdpa_resources *res = &mvdev->res;
+
+       if (!res->valid)
+               return;
+
+       iounmap(res->kick_addr);
+       res->kick_addr = NULL;
+       dealloc_pd(mvdev, res->pdn, res->uid);
+       destroy_uctx(mvdev, res->uid);
+       mlx5_put_uars_page(mvdev->mdev, res->uar);
+       mutex_destroy(&mvdev->mr.mkey_mtx);
+       res->valid = false;
+}
diff --git a/drivers/vdpa/mlx5/net/main.c b/drivers/vdpa/mlx5/net/main.c
new file mode 100644 (file)
index 0000000..838cd98
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vnet.h"
+
+MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox VDPA driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static bool required_caps_supported(struct mlx5_core_dev *mdev)
+{
+       u8 event_mode;
+       u64 got;
+
+       got = MLX5_CAP_GEN_64(mdev, general_obj_types);
+
+       if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
+               return false;
+
+       event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode);
+       if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
+               return false;
+
+       if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type))
+               return false;
+
+       return true;
+}
+
+static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev)
+{
+       struct mlx5_vdpa_dev *vdev;
+
+       if (mlx5_core_is_pf(mdev))
+               return NULL;
+
+       if (!required_caps_supported(mdev)) {
+               dev_info(mdev->device, "virtio net emulation not supported\n");
+               return NULL;
+       }
+       vdev = mlx5_vdpa_add_dev(mdev);
+       if (IS_ERR(vdev))
+               return NULL;
+
+       return vdev;
+}
+
+static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context)
+{
+       struct mlx5_vdpa_dev *vdev = context;
+
+       mlx5_vdpa_remove_dev(vdev);
+}
+
+static struct mlx5_interface mlx5_vdpa_interface = {
+       .add = mlx5_vdpa_add,
+       .remove = mlx5_vdpa_remove,
+       .protocol = MLX5_INTERFACE_PROTOCOL_VDPA,
+};
+
+static int __init mlx5_vdpa_init(void)
+{
+       return mlx5_register_interface(&mlx5_vdpa_interface);
+}
+
+static void __exit mlx5_vdpa_exit(void)
+{
+       mlx5_unregister_interface(&mlx5_vdpa_interface);
+}
+
+module_init(mlx5_vdpa_init);
+module_exit(mlx5_vdpa_exit);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
new file mode 100644 (file)
index 0000000..9df69d5
--- /dev/null
@@ -0,0 +1,1974 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_vnet.h"
+#include "mlx5_vdpa_ifc.h"
+#include "mlx5_vdpa.h"
+
+#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev)
+
+#define VALID_FEATURES_MASK                                                                        \
+       (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) |                                   \
+        BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) |   \
+        BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) |                             \
+        BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \
+        BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) |   \
+        BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) |      \
+        BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) |                                 \
+        BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) |                      \
+        BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) |  \
+        BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) |           \
+        BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) |                          \
+        BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) |      \
+        BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV))
+
+#define VALID_STATUS_MASK                                                                          \
+       (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK |        \
+        VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED)
+
+struct mlx5_vdpa_net_resources {
+       u32 tisn;
+       u32 tdn;
+       u32 tirn;
+       u32 rqtn;
+       bool valid;
+};
+
+struct mlx5_vdpa_cq_buf {
+       struct mlx5_frag_buf_ctrl fbc;
+       struct mlx5_frag_buf frag_buf;
+       int cqe_size;
+       int nent;
+};
+
+struct mlx5_vdpa_cq {
+       struct mlx5_core_cq mcq;
+       struct mlx5_vdpa_cq_buf buf;
+       struct mlx5_db db;
+       int cqe;
+};
+
+struct mlx5_vdpa_umem {
+       struct mlx5_frag_buf_ctrl fbc;
+       struct mlx5_frag_buf frag_buf;
+       int size;
+       u32 id;
+};
+
+struct mlx5_vdpa_qp {
+       struct mlx5_core_qp mqp;
+       struct mlx5_frag_buf frag_buf;
+       struct mlx5_db db;
+       u16 head;
+       bool fw;
+};
+
+struct mlx5_vq_restore_info {
+       u32 num_ent;
+       u64 desc_addr;
+       u64 device_addr;
+       u64 driver_addr;
+       u16 avail_index;
+       bool ready;
+       struct vdpa_callback cb;
+       bool restore;
+};
+
+struct mlx5_vdpa_virtqueue {
+       bool ready;
+       u64 desc_addr;
+       u64 device_addr;
+       u64 driver_addr;
+       u32 num_ent;
+       struct vdpa_callback event_cb;
+
+       /* Resources for implementing the notification channel from the device
+        * to the driver. fwqp is the firmware end of an RC connection; the
+        * other end is vqqp used by the driver. cq is is where completions are
+        * reported.
+        */
+       struct mlx5_vdpa_cq cq;
+       struct mlx5_vdpa_qp fwqp;
+       struct mlx5_vdpa_qp vqqp;
+
+       /* umem resources are required for the virtqueue operation. They're use
+        * is internal and they must be provided by the driver.
+        */
+       struct mlx5_vdpa_umem umem1;
+       struct mlx5_vdpa_umem umem2;
+       struct mlx5_vdpa_umem umem3;
+
+       bool initialized;
+       int index;
+       u32 virtq_id;
+       struct mlx5_vdpa_net *ndev;
+       u16 avail_idx;
+       int fw_state;
+
+       /* keep last in the struct */
+       struct mlx5_vq_restore_info ri;
+};
+
+/* We will remove this limitation once mlx5_vdpa_alloc_resources()
+ * provides for driver space allocation
+ */
+#define MLX5_MAX_SUPPORTED_VQS 16
+
+struct mlx5_vdpa_net {
+       struct mlx5_vdpa_dev mvdev;
+       struct mlx5_vdpa_net_resources res;
+       struct virtio_net_config config;
+       struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
+
+       /* Serialize vq resources creation and destruction. This is required
+        * since memory map might change and we need to destroy and create
+        * resources while driver in operational.
+        */
+       struct mutex reslock;
+       struct mlx5_flow_table *rxft;
+       struct mlx5_fc *rx_counter;
+       struct mlx5_flow_handle *rx_rule;
+       bool setup;
+       u16 mtu;
+};
+
+static void free_resources(struct mlx5_vdpa_net *ndev);
+static void init_mvqs(struct mlx5_vdpa_net *ndev);
+static int setup_driver(struct mlx5_vdpa_net *ndev);
+static void teardown_driver(struct mlx5_vdpa_net *ndev);
+
+static bool mlx5_vdpa_debug;
+
+#define MLX5_LOG_VIO_FLAG(_feature)                                                                \
+       do {                                                                                       \
+               if (features & BIT(_feature))                                                      \
+                       mlx5_vdpa_info(mvdev, "%s\n", #_feature);                                  \
+       } while (0)
+
+#define MLX5_LOG_VIO_STAT(_status)                                                                 \
+       do {                                                                                       \
+               if (status & (_status))                                                            \
+                       mlx5_vdpa_info(mvdev, "%s\n", #_status);                                   \
+       } while (0)
+
+static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
+{
+       if (status & ~VALID_STATUS_MASK)
+               mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
+                              status & ~VALID_STATUS_MASK);
+
+       if (!mlx5_vdpa_debug)
+               return;
+
+       mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
+       if (set && !status) {
+               mlx5_vdpa_info(mvdev, "driver resets the device\n");
+               return;
+       }
+
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE);
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER);
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK);
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK);
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET);
+       MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED);
+}
+
+static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
+{
+       if (features & ~VALID_FEATURES_MASK)
+               mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
+                              features & ~VALID_FEATURES_MASK);
+
+       if (!mlx5_vdpa_debug)
+               return;
+
+       mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
+       if (!features)
+               mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
+
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY);
+       MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM);
+       MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV);
+}
+
+static int create_tis(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+       void *tisc;
+       int err;
+
+       tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+       MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn);
+       err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
+       if (err)
+               mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
+
+       return err;
+}
+
+static void destroy_tis(struct mlx5_vdpa_net *ndev)
+{
+       mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
+}
+
+#define MLX5_VDPA_CQE_SIZE 64
+#define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE)
+
+static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent)
+{
+       struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+       u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE;
+       u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE;
+       int err;
+
+       err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
+                                      ndev->mvdev.mdev->priv.numa_node);
+       if (err)
+               return err;
+
+       mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
+       buf->cqe_size = MLX5_VDPA_CQE_SIZE;
+       buf->nent = nent;
+
+       return 0;
+}
+
+static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size)
+{
+       struct mlx5_frag_buf *frag_buf = &umem->frag_buf;
+
+       return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
+                                       ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf)
+{
+       mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
+}
+
+static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n)
+{
+       return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n);
+}
+
+static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf)
+{
+       struct mlx5_cqe64 *cqe64;
+       void *cqe;
+       int i;
+
+       for (i = 0; i < buf->nent; i++) {
+               cqe = get_cqe(vcq, i);
+               cqe64 = cqe;
+               cqe64->op_own = MLX5_CQE_INVALID << 4;
+       }
+}
+
+static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n)
+{
+       struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1));
+
+       if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+           !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe)))
+               return cqe64;
+
+       return NULL;
+}
+
+static void rx_post(struct mlx5_vdpa_qp *vqp, int n)
+{
+       vqp->head += n;
+       vqp->db.db[0] = cpu_to_be32(vqp->head);
+}
+
+static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in,
+                      struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
+{
+       struct mlx5_vdpa_qp *vqp;
+       __be64 *pas;
+       void *qpc;
+
+       vqp = fw ? &mvq->fwqp : &mvq->vqqp;
+       MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
+       qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+       if (vqp->fw) {
+               /* Firmware QP is allocated by the driver for the firmware's
+                * use so we can skip part of the params as they will be chosen by firmware
+                */
+               qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+               MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
+               MLX5_SET(qpc, qpc, no_sq, 1);
+               return;
+       }
+
+       MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+       MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+       MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+       MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+       MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
+       MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET(qpc, qpc, no_sq, 1);
+       MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
+       MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent));
+       MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+       pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas);
+       mlx5_fill_page_frag_array(&vqp->frag_buf, pas);
+}
+
+static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent)
+{
+       return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
+                                       num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf,
+                                       ndev->mvdev.mdev->priv.numa_node);
+}
+
+static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+       mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
+}
+
+static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+                    struct mlx5_vdpa_qp *vqp)
+{
+       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+       int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+       u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+       void *qpc;
+       void *in;
+       int err;
+
+       if (!vqp->fw) {
+               vqp = &mvq->vqqp;
+               err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
+               if (err)
+                       return err;
+
+               err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
+               if (err)
+                       goto err_db;
+               inlen += vqp->frag_buf.npages * sizeof(__be64);
+       }
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_kzalloc;
+       }
+
+       qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
+       qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+       MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+       MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+       MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
+       MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+       if (!vqp->fw)
+               MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma);
+       MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       kfree(in);
+       if (err)
+               goto err_kzalloc;
+
+       vqp->mqp.uid = ndev->mvdev.res.uid;
+       vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn);
+
+       if (!vqp->fw)
+               rx_post(vqp, mvq->num_ent);
+
+       return 0;
+
+err_kzalloc:
+       if (!vqp->fw)
+               mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+err_db:
+       if (!vqp->fw)
+               rq_buf_free(ndev, vqp);
+
+       return err;
+}
+
+static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn);
+       MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
+       if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
+               mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
+       if (!vqp->fw) {
+               mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
+               rq_buf_free(ndev, vqp);
+       }
+}
+
+static void *next_cqe_sw(struct mlx5_vdpa_cq *cq)
+{
+       return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
+{
+       struct mlx5_cqe64 *cqe64;
+
+       cqe64 = next_cqe_sw(vcq);
+       if (!cqe64)
+               return -EAGAIN;
+
+       vcq->mcq.cons_index++;
+       return 0;
+}
+
+static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+       mlx5_cq_set_ci(&mvq->cq.mcq);
+       rx_post(&mvq->vqqp, num);
+       if (mvq->event_cb.callback)
+               mvq->event_cb.callback(mvq->event_cb.private);
+}
+
+static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
+{
+       struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
+       struct mlx5_vdpa_net *ndev = mvq->ndev;
+       void __iomem *uar_page = ndev->mvdev.res.uar->map;
+       int num = 0;
+
+       while (!mlx5_vdpa_poll_one(&mvq->cq)) {
+               num++;
+               if (num > mvq->num_ent / 2) {
+                       /* If completions keep coming while we poll, we want to
+                        * let the hardware know that we consumed them by
+                        * updating the doorbell record.  We also let vdpa core
+                        * know about this so it passes it on the virtio driver
+                        * on the guest.
+                        */
+                       mlx5_vdpa_handle_completions(mvq, num);
+                       num = 0;
+               }
+       }
+
+       if (num)
+               mlx5_vdpa_handle_completions(mvq, num);
+
+       mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+}
+
+static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent)
+{
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+       void __iomem *uar_page = ndev->mvdev.res.uar->map;
+       u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+       struct mlx5_vdpa_cq *vcq = &mvq->cq;
+       unsigned int irqn;
+       __be64 *pas;
+       int inlen;
+       void *cqc;
+       void *in;
+       int err;
+       int eqn;
+
+       err = mlx5_db_alloc(mdev, &vcq->db);
+       if (err)
+               return err;
+
+       vcq->mcq.set_ci_db = vcq->db.db;
+       vcq->mcq.arm_db = vcq->db.db + 1;
+       vcq->mcq.cqe_sz = 64;
+
+       err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent);
+       if (err)
+               goto err_db;
+
+       cq_frag_buf_init(vcq, &vcq->buf);
+
+       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+               MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages;
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_vzalloc;
+       }
+
+       MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
+       pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+       mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas);
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+       MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
+       /* Use vector 0 by default. Consider adding code to choose least used
+        * vector.
+        */
+       err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn);
+       if (err)
+               goto err_vec;
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+       MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent));
+       MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
+       MLX5_SET(cqc, cqc, c_eqn, eqn);
+       MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma);
+
+       err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out));
+       if (err)
+               goto err_vec;
+
+       vcq->mcq.comp = mlx5_vdpa_cq_comp;
+       vcq->cqe = num_ent;
+       vcq->mcq.set_ci_db = vcq->db.db;
+       vcq->mcq.arm_db = vcq->db.db + 1;
+       mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
+       kfree(in);
+       return 0;
+
+err_vec:
+       kfree(in);
+err_vzalloc:
+       cq_frag_buf_free(ndev, &vcq->buf);
+err_db:
+       mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+       return err;
+}
+
+static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
+{
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+       struct mlx5_vdpa_cq *vcq = &mvq->cq;
+
+       if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) {
+               mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
+               return;
+       }
+       cq_frag_buf_free(ndev, &vcq->buf);
+       mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
+}
+
+static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
+                    struct mlx5_vdpa_umem **umemp)
+{
+       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+       int p_a;
+       int p_b;
+
+       switch (num) {
+       case 1:
+               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
+               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+               *umemp = &mvq->umem1;
+               break;
+       case 2:
+               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
+               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+               *umemp = &mvq->umem2;
+               break;
+       case 3:
+               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
+               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+               *umemp = &mvq->umem3;
+               break;
+       }
+       return p_a * mvq->num_ent + p_b;
+}
+
+static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem)
+{
+       mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
+}
+
+static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+       int inlen;
+       u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {};
+       void *um;
+       void *in;
+       int err;
+       __be64 *pas;
+       int size;
+       struct mlx5_vdpa_umem *umem;
+
+       size = umem_size(ndev, mvq, num, &umem);
+       if (size < 0)
+               return size;
+
+       umem->size = size;
+       err = umem_frag_buf_alloc(ndev, umem, size);
+       if (err)
+               return err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages;
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_in;
+       }
+
+       MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM);
+       MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
+       um = MLX5_ADDR_OF(create_umem_in, in, umem);
+       MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages);
+
+       pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]);
+       mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW);
+
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
+               goto err_cmd;
+       }
+
+       kfree(in);
+       umem->id = MLX5_GET(create_umem_out, out, umem_id);
+
+       return 0;
+
+err_cmd:
+       kfree(in);
+err_in:
+       umem_frag_buf_free(ndev, umem);
+       return err;
+}
+
+static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {};
+       struct mlx5_vdpa_umem *umem;
+
+       switch (num) {
+       case 1:
+               umem = &mvq->umem1;
+               break;
+       case 2:
+               umem = &mvq->umem2;
+               break;
+       case 3:
+               umem = &mvq->umem3;
+               break;
+       }
+
+       MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM);
+       MLX5_SET(destroy_umem_in, in, umem_id, umem->id);
+       if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
+               return;
+
+       umem_frag_buf_free(ndev, umem);
+}
+
+static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       int num;
+       int err;
+
+       for (num = 1; num <= 3; num++) {
+               err = create_umem(ndev, mvq, num);
+               if (err)
+                       goto err_umem;
+       }
+       return 0;
+
+err_umem:
+       for (num--; num > 0; num--)
+               umem_destroy(ndev, mvq, num);
+
+       return err;
+}
+
+static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       int num;
+
+       for (num = 3; num > 0; num--)
+               umem_destroy(ndev, mvq, num);
+}
+
+static int get_queue_type(struct mlx5_vdpa_net *ndev)
+{
+       u32 type_mask;
+
+       type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
+
+       /* prefer split queue */
+       if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
+               return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
+
+       WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
+
+       return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
+}
+
+static bool vq_is_tx(u16 idx)
+{
+       return idx % 2;
+}
+
+static u16 get_features_12_3(u64 features)
+{
+       return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) |
+              (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) |
+              (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) |
+              (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6);
+}
+
+static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
+       u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
+       void *obj_context;
+       void *cmd_hdr;
+       void *vq_ctx;
+       void *in;
+       int err;
+
+       err = umems_create(ndev, mvq);
+       if (err)
+               return err;
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
+
+       cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+       obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
+       MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+       MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
+                get_features_12_3(ndev->mvdev.actual_features));
+       vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
+       MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev));
+
+       if (vq_is_tx(mvq->index))
+               MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn);
+
+       MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE);
+       MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
+       MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
+       MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
+       MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
+                !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+       MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+       MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+       MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+       MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
+       MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
+       MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
+       MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
+       MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size);
+       MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
+       MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size);
+       MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
+       if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
+               MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
+
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+       if (err)
+               goto err_cmd;
+
+       kfree(in);
+       mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+       return 0;
+
+err_cmd:
+       kfree(in);
+err_alloc:
+       umems_destroy(ndev, mvq);
+       return err;
+}
+
+static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {};
+
+       MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode,
+                MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
+       MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
+       MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type,
+                MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+       if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
+               mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
+               return;
+       }
+       umems_destroy(ndev, mvq);
+}
+
+static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+       return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
+}
+
+static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
+{
+       return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
+}
+
+static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out,
+                       int *outlen, u32 qpn, u32 rqpn)
+{
+       void *qpc;
+       void *pp;
+
+       switch (cmd) {
+       case MLX5_CMD_OP_2RST_QP:
+               *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in);
+               *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out);
+               *in = kzalloc(*inlen, GFP_KERNEL);
+               *out = kzalloc(*outlen, GFP_KERNEL);
+               if (!*in || !*out)
+                       goto outerr;
+
+               MLX5_SET(qp_2rst_in, *in, opcode, cmd);
+               MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
+               MLX5_SET(qp_2rst_in, *in, qpn, qpn);
+               break;
+       case MLX5_CMD_OP_RST2INIT_QP:
+               *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in);
+               *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out);
+               *in = kzalloc(*inlen, GFP_KERNEL);
+               *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL);
+               if (!*in || !*out)
+                       goto outerr;
+
+               MLX5_SET(rst2init_qp_in, *in, opcode, cmd);
+               MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
+               MLX5_SET(rst2init_qp_in, *in, qpn, qpn);
+               qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+               MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+               MLX5_SET(qpc, qpc, rwe, 1);
+               pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+               MLX5_SET(ads, pp, vhca_port_num, 1);
+               break;
+       case MLX5_CMD_OP_INIT2RTR_QP:
+               *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in);
+               *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out);
+               *in = kzalloc(*inlen, GFP_KERNEL);
+               *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL);
+               if (!*in || !*out)
+                       goto outerr;
+
+               MLX5_SET(init2rtr_qp_in, *in, opcode, cmd);
+               MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
+               MLX5_SET(init2rtr_qp_in, *in, qpn, qpn);
+               qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+               MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES);
+               MLX5_SET(qpc, qpc, log_msg_max, 30);
+               MLX5_SET(qpc, qpc, remote_qpn, rqpn);
+               pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+               MLX5_SET(ads, pp, fl, 1);
+               break;
+       case MLX5_CMD_OP_RTR2RTS_QP:
+               *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in);
+               *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out);
+               *in = kzalloc(*inlen, GFP_KERNEL);
+               *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL);
+               if (!*in || !*out)
+                       goto outerr;
+
+               MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd);
+               MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
+               MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn);
+               qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc);
+               pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+               MLX5_SET(ads, pp, ack_timeout, 14);
+               MLX5_SET(qpc, qpc, retry_count, 7);
+               MLX5_SET(qpc, qpc, rnr_retry, 7);
+               break;
+       default:
+               goto outerr_nullify;
+       }
+
+       return;
+
+outerr:
+       kfree(*in);
+       kfree(*out);
+outerr_nullify:
+       *in = NULL;
+       *out = NULL;
+}
+
+static void free_inout(void *in, void *out)
+{
+       kfree(in);
+       kfree(out);
+}
+
+/* Two QPs are used by each virtqueue. One is used by the driver and one by
+ * firmware. The fw argument indicates whether the subjected QP is the one used
+ * by firmware.
+ */
+static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
+{
+       int outlen;
+       int inlen;
+       void *out;
+       void *in;
+       int err;
+
+       alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
+       if (!in || !out)
+               return -ENOMEM;
+
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
+       free_inout(in, out);
+       return err;
+}
+
+static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       int err;
+
+       err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
+       if (err)
+               return err;
+
+       err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
+       if (err)
+               return err;
+
+       err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
+       if (err)
+               return err;
+
+       err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
+       if (err)
+               return err;
+
+       err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
+       if (err)
+               return err;
+
+       err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
+       if (err)
+               return err;
+
+       return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
+}
+
+struct mlx5_virtq_attr {
+       u8 state;
+       u16 available_index;
+};
+
+static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
+                          struct mlx5_virtq_attr *attr)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out);
+       u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {};
+       void *out;
+       void *obj_context;
+       void *cmd_hdr;
+       int err;
+
+       out = kzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
+       if (err)
+               goto err_cmd;
+
+       obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context);
+       memset(attr, 0, sizeof(*attr));
+       attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
+       attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+       kfree(out);
+       return 0;
+
+err_cmd:
+       kfree(out);
+       return err;
+}
+
+static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
+       u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+       void *obj_context;
+       void *cmd_hdr;
+       void *in;
+       int err;
+
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr);
+
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
+       MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
+
+       obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
+       MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
+                  MLX5_VIRTQ_MODIFY_MASK_STATE);
+       MLX5_SET(virtio_net_q_object, obj_context, state, state);
+       err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
+       kfree(in);
+       if (!err)
+               mvq->fw_state = state;
+
+       return err;
+}
+
+static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       u16 idx = mvq->index;
+       int err;
+
+       if (!mvq->num_ent)
+               return 0;
+
+       if (mvq->initialized) {
+               mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
+               return -EINVAL;
+       }
+
+       err = cq_create(ndev, idx, mvq->num_ent);
+       if (err)
+               return err;
+
+       err = qp_create(ndev, mvq, &mvq->fwqp);
+       if (err)
+               goto err_fwqp;
+
+       err = qp_create(ndev, mvq, &mvq->vqqp);
+       if (err)
+               goto err_vqqp;
+
+       err = connect_qps(ndev, mvq);
+       if (err)
+               goto err_connect;
+
+       err = create_virtqueue(ndev, mvq);
+       if (err)
+               goto err_connect;
+
+       if (mvq->ready) {
+               err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+               if (err) {
+                       mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
+                                      idx, err);
+                       goto err_connect;
+               }
+       }
+
+       mvq->initialized = true;
+       return 0;
+
+err_connect:
+       qp_destroy(ndev, &mvq->vqqp);
+err_vqqp:
+       qp_destroy(ndev, &mvq->fwqp);
+err_fwqp:
+       cq_destroy(ndev, idx);
+       return err;
+}
+
+static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       struct mlx5_virtq_attr attr;
+
+       if (!mvq->initialized)
+               return;
+
+       if (query_virtqueue(ndev, mvq, &attr)) {
+               mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
+               return;
+       }
+       if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
+               return;
+
+       if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
+               mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
+}
+
+static void suspend_vqs(struct mlx5_vdpa_net *ndev)
+{
+       int i;
+
+       for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+               suspend_vq(ndev, &ndev->vqs[i]);
+}
+
+static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       if (!mvq->initialized)
+               return;
+
+       suspend_vq(ndev, mvq);
+       destroy_virtqueue(ndev, mvq);
+       qp_destroy(ndev, &mvq->vqqp);
+       qp_destroy(ndev, &mvq->fwqp);
+       cq_destroy(ndev, mvq->index);
+       mvq->initialized = false;
+}
+
+static int create_rqt(struct mlx5_vdpa_net *ndev)
+{
+       int log_max_rqt;
+       __be32 *list;
+       void *rqtc;
+       int inlen;
+       void *in;
+       int i, j;
+       int err;
+
+       log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+       if (log_max_rqt < 1)
+               return -EOPNOTSUPP;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num);
+       in = kzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
+       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+       MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+       MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt);
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, 1);
+       list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+       for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
+               if (!ndev->vqs[j].initialized)
+                       continue;
+
+               if (!vq_is_tx(ndev->vqs[j].index)) {
+                       list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
+                       i++;
+               }
+       }
+
+       err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
+       kfree(in);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static void destroy_rqt(struct mlx5_vdpa_net *ndev)
+{
+       mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
+}
+
+static int create_tir(struct mlx5_vdpa_net *ndev)
+{
+#define HASH_IP_L4PORTS                                                                            \
+       (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT |  \
+        MLX5_HASH_FIELD_SEL_L4_DPORT)
+       static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+                                                  0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+                                                  0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+                                                  0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+                                                  0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a };
+       void *rss_key;
+       void *outer;
+       void *tirc;
+       void *in;
+       int err;
+
+       in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
+       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+       MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+       MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+       MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+       rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+       memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key));
+
+       outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+       MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4);
+       MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP);
+       MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS);
+
+       MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn);
+       MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn);
+
+       err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
+       kfree(in);
+       return err;
+}
+
+static void destroy_tir(struct mlx5_vdpa_net *ndev)
+{
+       mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
+}
+
+static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_flow_destination dest[2] = {};
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_namespace *ns;
+       int err;
+
+       /* for now, one entry, match all, forward to tir */
+       ft_attr.max_fte = 1;
+       ft_attr.autogroup.max_num_groups = 1;
+
+       ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+       if (!ns) {
+               mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
+               return -EOPNOTSUPP;
+       }
+
+       ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+       if (IS_ERR(ndev->rxft))
+               return PTR_ERR(ndev->rxft);
+
+       ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
+       if (IS_ERR(ndev->rx_counter)) {
+               err = PTR_ERR(ndev->rx_counter);
+               goto err_fc;
+       }
+
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+       dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dest[0].tir_num = ndev->res.tirn;
+       dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
+       ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
+       if (IS_ERR(ndev->rx_rule)) {
+               err = PTR_ERR(ndev->rx_rule);
+               ndev->rx_rule = NULL;
+               goto err_rule;
+       }
+
+       return 0;
+
+err_rule:
+       mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+err_fc:
+       mlx5_destroy_flow_table(ndev->rxft);
+       return err;
+}
+
+static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
+{
+       if (!ndev->rx_rule)
+               return;
+
+       mlx5_del_flow_rules(ndev->rx_rule);
+       mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
+       mlx5_destroy_flow_table(ndev->rxft);
+
+       ndev->rx_rule = NULL;
+}
+
+static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+       if (unlikely(!mvq->ready))
+               return;
+
+       iowrite16(idx, ndev->mvdev.res.kick_addr);
+}
+
+static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
+                                   u64 driver_area, u64 device_area)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+       mvq->desc_addr = desc_area;
+       mvq->device_addr = device_area;
+       mvq->driver_addr = driver_area;
+       return 0;
+}
+
+static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq;
+
+       mvq = &ndev->vqs[idx];
+       mvq->num_ent = num;
+}
+
+static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx];
+
+       vq->event_cb = *cb;
+}
+
+static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+       if (!ready)
+               suspend_vq(ndev, mvq);
+
+       mvq->ready = ready;
+}
+
+static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+       return mvq->ready;
+}
+
+static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
+                                 const struct vdpa_vq_state *state)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+
+       if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
+               mlx5_vdpa_warn(mvdev, "can't modify available index\n");
+               return -EINVAL;
+       }
+
+       mvq->avail_idx = state->avail_index;
+       return 0;
+}
+
+static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
+       struct mlx5_virtq_attr attr;
+       int err;
+
+       if (!mvq->initialized)
+               return -EAGAIN;
+
+       err = query_virtqueue(ndev, mvq, &attr);
+       if (err) {
+               mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+               return err;
+       }
+       state->avail_index = attr.available_index;
+       return 0;
+}
+
+static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
+{
+       return PAGE_SIZE;
+}
+
+enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
+       MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
+       MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
+       MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12,
+};
+
+static u64 mlx_to_vritio_features(u16 dev_features)
+{
+       u64 result = 0;
+
+       if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM)
+               result |= BIT(VIRTIO_NET_F_GUEST_CSUM);
+       if (dev_features & MLX5_VIRTIO_NET_F_CSUM)
+               result |= BIT(VIRTIO_NET_F_CSUM);
+       if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6)
+               result |= BIT(VIRTIO_NET_F_HOST_TSO6);
+       if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4)
+               result |= BIT(VIRTIO_NET_F_HOST_TSO4);
+
+       return result;
+}
+
+static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       u16 dev_features;
+
+       dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
+       ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
+       if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
+               ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1);
+       ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM);
+       print_features(mvdev, ndev->mvdev.mlx_features, false);
+       return ndev->mvdev.mlx_features;
+}
+
+static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features)
+{
+       if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM)))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static int setup_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
+               err = setup_vq(ndev, &ndev->vqs[i]);
+               if (err)
+                       goto err_vq;
+       }
+
+       return 0;
+
+err_vq:
+       for (--i; i >= 0; i--)
+               teardown_vq(ndev, &ndev->vqs[i]);
+
+       return err;
+}
+
+static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_virtqueue *mvq;
+       int i;
+
+       for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+               mvq = &ndev->vqs[i];
+               if (!mvq->initialized)
+                       continue;
+
+               teardown_vq(ndev, mvq);
+       }
+}
+
+/* TODO: cross-endian support */
+static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+{
+       return virtio_legacy_is_little_endian() ||
+               (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       int err;
+
+       print_features(mvdev, features, true);
+
+       err = verify_min_features(mvdev, features);
+       if (err)
+               return err;
+
+       ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+       ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
+                                            ndev->mtu);
+       return err;
+}
+
+static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
+{
+       /* not implemented */
+       mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
+}
+
+#define MLX5_VDPA_MAX_VQ_ENTRIES 256
+static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev)
+{
+       return MLX5_VDPA_MAX_VQ_ENTRIES;
+}
+
+static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev)
+{
+       return VIRTIO_ID_NET;
+}
+
+static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev)
+{
+       return PCI_VENDOR_ID_MELLANOX;
+}
+
+static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+       print_status(mvdev, ndev->mvdev.status, false);
+       return ndev->mvdev.status;
+}
+
+static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       struct mlx5_vq_restore_info *ri = &mvq->ri;
+       struct mlx5_virtq_attr attr;
+       int err;
+
+       if (!mvq->initialized)
+               return 0;
+
+       err = query_virtqueue(ndev, mvq, &attr);
+       if (err)
+               return err;
+
+       ri->avail_index = attr.available_index;
+       ri->ready = mvq->ready;
+       ri->num_ent = mvq->num_ent;
+       ri->desc_addr = mvq->desc_addr;
+       ri->device_addr = mvq->device_addr;
+       ri->driver_addr = mvq->driver_addr;
+       ri->cb = mvq->event_cb;
+       ri->restore = true;
+       return 0;
+}
+
+static int save_channels_info(struct mlx5_vdpa_net *ndev)
+{
+       int i;
+
+       for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+               memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri));
+               save_channel_info(ndev, &ndev->vqs[i]);
+       }
+       return 0;
+}
+
+static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev)
+{
+       int i;
+
+       for (i = 0; i < ndev->mvdev.max_vqs; i++)
+               memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+}
+
+static void restore_channels_info(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_virtqueue *mvq;
+       struct mlx5_vq_restore_info *ri;
+       int i;
+
+       mlx5_clear_vqs(ndev);
+       init_mvqs(ndev);
+       for (i = 0; i < ndev->mvdev.max_vqs; i++) {
+               mvq = &ndev->vqs[i];
+               ri = &mvq->ri;
+               if (!ri->restore)
+                       continue;
+
+               mvq->avail_idx = ri->avail_index;
+               mvq->ready = ri->ready;
+               mvq->num_ent = ri->num_ent;
+               mvq->desc_addr = ri->desc_addr;
+               mvq->device_addr = ri->device_addr;
+               mvq->driver_addr = ri->driver_addr;
+               mvq->event_cb = ri->cb;
+       }
+}
+
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb)
+{
+       int err;
+
+       suspend_vqs(ndev);
+       err = save_channels_info(ndev);
+       if (err)
+               goto err_mr;
+
+       teardown_driver(ndev);
+       mlx5_vdpa_destroy_mr(&ndev->mvdev);
+       err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
+       if (err)
+               goto err_mr;
+
+       restore_channels_info(ndev);
+       err = setup_driver(ndev);
+       if (err)
+               goto err_setup;
+
+       return 0;
+
+err_setup:
+       mlx5_vdpa_destroy_mr(&ndev->mvdev);
+err_mr:
+       return err;
+}
+
+static int setup_driver(struct mlx5_vdpa_net *ndev)
+{
+       int err;
+
+       mutex_lock(&ndev->reslock);
+       if (ndev->setup) {
+               mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
+               err = 0;
+               goto out;
+       }
+       err = setup_virtqueues(ndev);
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
+               goto out;
+       }
+
+       err = create_rqt(ndev);
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
+               goto err_rqt;
+       }
+
+       err = create_tir(ndev);
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
+               goto err_tir;
+       }
+
+       err = add_fwd_to_tir(ndev);
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
+               goto err_fwd;
+       }
+       ndev->setup = true;
+       mutex_unlock(&ndev->reslock);
+
+       return 0;
+
+err_fwd:
+       destroy_tir(ndev);
+err_tir:
+       destroy_rqt(ndev);
+err_rqt:
+       teardown_virtqueues(ndev);
+out:
+       mutex_unlock(&ndev->reslock);
+       return err;
+}
+
+static void teardown_driver(struct mlx5_vdpa_net *ndev)
+{
+       mutex_lock(&ndev->reslock);
+       if (!ndev->setup)
+               goto out;
+
+       remove_fwd_to_tir(ndev);
+       destroy_tir(ndev);
+       destroy_rqt(ndev);
+       teardown_virtqueues(ndev);
+       ndev->setup = false;
+out:
+       mutex_unlock(&ndev->reslock);
+}
+
+static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       int err;
+
+       print_status(mvdev, status, true);
+       if (!status) {
+               mlx5_vdpa_info(mvdev, "performing device reset\n");
+               teardown_driver(ndev);
+               mlx5_vdpa_destroy_mr(&ndev->mvdev);
+               ndev->mvdev.status = 0;
+               ndev->mvdev.mlx_features = 0;
+               ++mvdev->generation;
+               return;
+       }
+
+       if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
+               if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+                       err = setup_driver(ndev);
+                       if (err) {
+                               mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
+                               goto err_setup;
+                       }
+               } else {
+                       mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
+                       return;
+               }
+       }
+
+       ndev->mvdev.status = status;
+       return;
+
+err_setup:
+       mlx5_vdpa_destroy_mr(&ndev->mvdev);
+       ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+}
+
+static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf,
+                                unsigned int len)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+       if (offset + len < sizeof(struct virtio_net_config))
+               memcpy(buf, (u8 *)&ndev->config + offset, len);
+}
+
+static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf,
+                                unsigned int len)
+{
+       /* not supported */
+}
+
+static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+       return mvdev->generation;
+}
+
+static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       bool change_map;
+       int err;
+
+       err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
+       if (err) {
+               mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
+               return err;
+       }
+
+       if (change_map)
+               return mlx5_vdpa_change_map(ndev, iotlb);
+
+       return 0;
+}
+
+static void mlx5_vdpa_free(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev;
+
+       ndev = to_mlx5_vdpa_ndev(mvdev);
+
+       free_resources(ndev);
+       mlx5_vdpa_free_resources(&ndev->mvdev);
+       mutex_destroy(&ndev->reslock);
+}
+
+static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
+{
+       struct vdpa_notification_area ret = {};
+
+       return ret;
+}
+
+static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
+{
+       return -EOPNOTSUPP;
+}
+
+static const struct vdpa_config_ops mlx5_vdpa_ops = {
+       .set_vq_address = mlx5_vdpa_set_vq_address,
+       .set_vq_num = mlx5_vdpa_set_vq_num,
+       .kick_vq = mlx5_vdpa_kick_vq,
+       .set_vq_cb = mlx5_vdpa_set_vq_cb,
+       .set_vq_ready = mlx5_vdpa_set_vq_ready,
+       .get_vq_ready = mlx5_vdpa_get_vq_ready,
+       .set_vq_state = mlx5_vdpa_set_vq_state,
+       .get_vq_state = mlx5_vdpa_get_vq_state,
+       .get_vq_notification = mlx5_get_vq_notification,
+       .get_vq_irq = mlx5_get_vq_irq,
+       .get_vq_align = mlx5_vdpa_get_vq_align,
+       .get_features = mlx5_vdpa_get_features,
+       .set_features = mlx5_vdpa_set_features,
+       .set_config_cb = mlx5_vdpa_set_config_cb,
+       .get_vq_num_max = mlx5_vdpa_get_vq_num_max,
+       .get_device_id = mlx5_vdpa_get_device_id,
+       .get_vendor_id = mlx5_vdpa_get_vendor_id,
+       .get_status = mlx5_vdpa_get_status,
+       .set_status = mlx5_vdpa_set_status,
+       .get_config = mlx5_vdpa_get_config,
+       .set_config = mlx5_vdpa_set_config,
+       .get_generation = mlx5_vdpa_get_generation,
+       .set_map = mlx5_vdpa_set_map,
+       .free = mlx5_vdpa_free,
+};
+
+static int alloc_resources(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_net_resources *res = &ndev->res;
+       int err;
+
+       if (res->valid) {
+               mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
+               return -EEXIST;
+       }
+
+       err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
+       if (err)
+               return err;
+
+       err = create_tis(ndev);
+       if (err)
+               goto err_tis;
+
+       res->valid = true;
+
+       return 0;
+
+err_tis:
+       mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+       return err;
+}
+
+static void free_resources(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_net_resources *res = &ndev->res;
+
+       if (!res->valid)
+               return;
+
+       destroy_tis(ndev);
+       mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
+       res->valid = false;
+}
+
+static void init_mvqs(struct mlx5_vdpa_net *ndev)
+{
+       struct mlx5_vdpa_virtqueue *mvq;
+       int i;
+
+       for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+               mvq = &ndev->vqs[i];
+               memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+               mvq->index = i;
+               mvq->ndev = ndev;
+               mvq->fwqp.fw = true;
+       }
+       for (; i < ndev->mvdev.max_vqs; i++) {
+               mvq = &ndev->vqs[i];
+               memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+               mvq->index = i;
+               mvq->ndev = ndev;
+       }
+}
+
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
+{
+       struct virtio_net_config *config;
+       struct mlx5_vdpa_dev *mvdev;
+       struct mlx5_vdpa_net *ndev;
+       u32 max_vqs;
+       int err;
+
+       /* we save one virtqueue for control virtqueue should we require it */
+       max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+       max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+
+       ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
+                                2 * mlx5_vdpa_max_qps(max_vqs));
+       if (IS_ERR(ndev))
+               return ndev;
+
+       ndev->mvdev.max_vqs = max_vqs;
+       mvdev = &ndev->mvdev;
+       mvdev->mdev = mdev;
+       init_mvqs(ndev);
+       mutex_init(&ndev->reslock);
+       config = &ndev->config;
+       err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+       if (err)
+               goto err_mtu;
+
+       err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
+       if (err)
+               goto err_mtu;
+
+       mvdev->vdev.dma_dev = mdev->device;
+       err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+       if (err)
+               goto err_mtu;
+
+       err = alloc_resources(ndev);
+       if (err)
+               goto err_res;
+
+       err = vdpa_register_device(&mvdev->vdev);
+       if (err)
+               goto err_reg;
+
+       return ndev;
+
+err_reg:
+       free_resources(ndev);
+err_res:
+       mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mtu:
+       mutex_destroy(&ndev->reslock);
+       put_device(&mvdev->vdev.dev);
+       return ERR_PTR(err);
+}
+
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
+{
+       vdpa_unregister_device(&mvdev->vdev);
+}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
new file mode 100644 (file)
index 0000000..f2d6d68
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_VNET_H_
+#define __MLX5_VNET_H_
+
+#include <linux/vdpa.h>
+#include <linux/virtio_net.h>
+#include <linux/vringh.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/qp.h>
+#include "mlx5_vdpa.h"
+
+static inline u32 mlx5_vdpa_max_qps(int max_vqs)
+{
+       return max_vqs / 2;
+}
+
+#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
+void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev);
+void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev);
+
+#endif /* __MLX5_VNET_H_ */
index de211ef..a69ffc9 100644 (file)
@@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d)
  * initialized but before registered.
  * @parent: the parent device
  * @config: the bus operations that is supported by this device
+ * @nvqs: number of virtqueues supported by this device
  * @size: size of the parent structure that contains private data
  *
  * Driver should use vdpa_alloc_device() wrapper macro instead of
@@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d)
  */
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
+                                       int nvqs,
                                        size_t size)
 {
        struct vdpa_device *vdev;
@@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
        vdev->dev.release = vdpa_release_dev;
        vdev->index = err;
        vdev->config = config;
+       vdev->features_valid = false;
+       vdev->nvqs = nvqs;
 
        err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
        if (err)
index c7334cc..62d6403 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/etherdevice.h>
 #include <linux/vringh.h>
 #include <linux/vdpa.h>
+#include <linux/virtio_byteorder.h>
 #include <linux/vhost_iotlb.h>
 #include <uapi/linux/virtio_config.h>
 #include <uapi/linux/virtio_net.h>
 #define DRV_DESC     "vDPA Device Simulator"
 #define DRV_LICENSE  "GPL v2"
 
+static int batch_mapping = 1;
+module_param(batch_mapping, int, 0444);
+MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+
 struct vdpasim_virtqueue {
        struct vringh vring;
        struct vringh_kiov iov;
@@ -55,12 +60,12 @@ struct vdpasim_virtqueue {
 
 static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
                              (1ULL << VIRTIO_F_VERSION_1)  |
-                             (1ULL << VIRTIO_F_IOMMU_PLATFORM);
+                             (1ULL << VIRTIO_F_ACCESS_PLATFORM);
 
 /* State of each vdpasim device */
 struct vdpasim {
        struct vdpa_device vdpa;
-       struct vdpasim_virtqueue vqs[2];
+       struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
        struct work_struct work;
        /* spinlock to synchronize virtqueue state */
        spinlock_t lock;
@@ -70,8 +75,27 @@ struct vdpasim {
        u32 status;
        u32 generation;
        u64 features;
+       /* spinlock to synchronize iommu table */
+       spinlock_t iommu_lock;
 };
 
+/* TODO: cross-endian support */
+static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
+{
+       return virtio_legacy_is_little_endian() ||
+               (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
+{
+       return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
+{
+       return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
+}
+
 static struct vdpasim *vdpasim_dev;
 
 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
@@ -118,7 +142,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
        for (i = 0; i < VDPASIM_VQ_NUM; i++)
                vdpasim_vq_reset(&vdpasim->vqs[i]);
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_reset(vdpasim->iommu);
+       spin_unlock(&vdpasim->iommu_lock);
 
        vdpasim->features = 0;
        vdpasim->status = 0;
@@ -236,8 +262,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
        /* For simplicity, use identical mapping to avoid e.g iova
         * allocator.
         */
+       spin_lock(&vdpasim->iommu_lock);
        ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
                                    pa, dir_to_perm(dir));
+       spin_unlock(&vdpasim->iommu_lock);
        if (ret)
                return DMA_MAPPING_ERROR;
 
@@ -251,8 +279,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
        struct vdpasim *vdpasim = dev_to_sim(dev);
        struct vhost_iotlb *iommu = vdpasim->iommu;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(iommu, (u64)dma_addr,
                              (u64)dma_addr + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
 }
 
 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -264,9 +294,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
        void *addr = kmalloc(size, flag);
        int ret;
 
-       if (!addr)
+       spin_lock(&vdpasim->iommu_lock);
+       if (!addr) {
                *dma_addr = DMA_MAPPING_ERROR;
-       else {
+       else {
                u64 pa = virt_to_phys(addr);
 
                ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -279,6 +310,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
                } else
                        *dma_addr = (dma_addr_t)pa;
        }
+       spin_unlock(&vdpasim->iommu_lock);
 
        return addr;
 }
@@ -290,8 +322,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
        struct vdpasim *vdpasim = dev_to_sim(dev);
        struct vhost_iotlb *iommu = vdpasim->iommu;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(iommu, (u64)dma_addr,
                              (u64)dma_addr + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
+
        kfree(phys_to_virt((uintptr_t)dma_addr));
 }
 
@@ -303,21 +338,27 @@ static const struct dma_map_ops vdpasim_dma_ops = {
 };
 
 static const struct vdpa_config_ops vdpasim_net_config_ops;
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
 
 static struct vdpasim *vdpasim_create(void)
 {
-       struct virtio_net_config *config;
+       const struct vdpa_config_ops *ops;
        struct vdpasim *vdpasim;
        struct device *dev;
        int ret = -ENOMEM;
 
-       vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
-                                   &vdpasim_net_config_ops);
+       if (batch_mapping)
+               ops = &vdpasim_net_batch_config_ops;
+       else
+               ops = &vdpasim_net_config_ops;
+
+       vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
        if (!vdpasim)
                goto err_alloc;
 
        INIT_WORK(&vdpasim->work, vdpasim_work);
        spin_lock_init(&vdpasim->lock);
+       spin_lock_init(&vdpasim->iommu_lock);
 
        dev = &vdpasim->vdpa.dev;
        dev->coherent_dma_mask = DMA_BIT_MASK(64);
@@ -331,10 +372,7 @@ static struct vdpasim *vdpasim_create(void)
        if (!vdpasim->buffer)
                goto err_iommu;
 
-       config = &vdpasim->config;
-       config->mtu = 1500;
-       config->status = VIRTIO_NET_S_LINK_UP;
-       eth_random_addr(config->mac);
+       eth_random_addr(vdpasim->config.mac);
 
        vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
        vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
@@ -413,26 +451,29 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
        return vq->ready;
 }
 
-static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state)
+static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
+                               const struct vdpa_vq_state *state)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
        struct vringh *vrh = &vq->vring;
 
        spin_lock(&vdpasim->lock);
-       vrh->last_avail_idx = state;
+       vrh->last_avail_idx = state->avail_index;
        spin_unlock(&vdpasim->lock);
 
        return 0;
 }
 
-static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
+static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
+                               struct vdpa_vq_state *state)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
        struct vringh *vrh = &vq->vring;
 
-       return vrh->last_avail_idx;
+       state->avail_index = vrh->last_avail_idx;
+       return 0;
 }
 
 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
@@ -448,13 +489,22 @@ static u64 vdpasim_get_features(struct vdpa_device *vdpa)
 static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+       struct virtio_net_config *config = &vdpasim->config;
 
        /* DMA mapping must be done by driver */
-       if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+       if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
                return -EINVAL;
 
        vdpasim->features = features & vdpasim_features;
 
+       /* We generally only know whether guest is using the legacy interface
+        * here, so generally that's the earliest we can set config fields.
+        * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
+        * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
+        */
+
+       config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
+       config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
        return 0;
 }
 
@@ -508,7 +558,7 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 
        if (offset + len < sizeof(struct virtio_net_config))
-               memcpy(buf, &vdpasim->config + offset, len);
+               memcpy(buf, (u8 *)&vdpasim->config + offset, len);
 }
 
 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
@@ -532,6 +582,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
        u64 start = 0ULL, last = 0ULL - 1;
        int ret;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_reset(vdpasim->iommu);
 
        for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -541,10 +592,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
                if (ret)
                        goto err;
        }
+       spin_unlock(&vdpasim->iommu_lock);
        return 0;
 
 err:
        vhost_iotlb_reset(vdpasim->iommu);
+       spin_unlock(&vdpasim->iommu_lock);
        return ret;
 }
 
@@ -552,16 +605,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
                           u64 pa, u32 perm)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+       int ret;
+
+       spin_lock(&vdpasim->iommu_lock);
+       ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
+                                   perm);
+       spin_unlock(&vdpasim->iommu_lock);
 
-       return vhost_iotlb_add_range(vdpasim->iommu, iova,
-                                    iova + size - 1, pa, perm);
+       return ret;
 }
 
 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
 
        return 0;
 }
@@ -597,12 +657,36 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
        .get_config             = vdpasim_get_config,
        .set_config             = vdpasim_set_config,
        .get_generation         = vdpasim_get_generation,
-       .set_map                = vdpasim_set_map,
        .dma_map                = vdpasim_dma_map,
        .dma_unmap              = vdpasim_dma_unmap,
        .free                   = vdpasim_free,
 };
 
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+       .set_vq_address         = vdpasim_set_vq_address,
+       .set_vq_num             = vdpasim_set_vq_num,
+       .kick_vq                = vdpasim_kick_vq,
+       .set_vq_cb              = vdpasim_set_vq_cb,
+       .set_vq_ready           = vdpasim_set_vq_ready,
+       .get_vq_ready           = vdpasim_get_vq_ready,
+       .set_vq_state           = vdpasim_set_vq_state,
+       .get_vq_state           = vdpasim_get_vq_state,
+       .get_vq_align           = vdpasim_get_vq_align,
+       .get_features           = vdpasim_get_features,
+       .set_features           = vdpasim_set_features,
+       .set_config_cb          = vdpasim_set_config_cb,
+       .get_vq_num_max         = vdpasim_get_vq_num_max,
+       .get_device_id          = vdpasim_get_device_id,
+       .get_vendor_id          = vdpasim_get_vendor_id,
+       .get_status             = vdpasim_get_status,
+       .set_status             = vdpasim_set_status,
+       .get_config             = vdpasim_get_config,
+       .set_config             = vdpasim_set_config,
+       .get_generation         = vdpasim_get_generation,
+       .set_map                = vdpasim_set_map,
+       .free                   = vdpasim_free,
+};
+
 static int __init vdpasim_dev_init(void)
 {
        vdpasim_dev = vdpasim_create();
index de881a6..620465c 100644 (file)
@@ -60,6 +60,10 @@ module_param(enable_sriov, bool, 0644);
 MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration.  Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
 #endif
 
+static bool disable_denylist;
+module_param(disable_denylist, bool, 0444);
+MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
+
 static inline bool vfio_vga_disabled(void)
 {
 #ifdef CONFIG_VFIO_PCI_VGA
@@ -69,6 +73,44 @@ static inline bool vfio_vga_disabled(void)
 #endif
 }
 
+static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
+{
+       switch (pdev->vendor) {
+       case PCI_VENDOR_ID_INTEL:
+               switch (pdev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+               case PCI_DEVICE_ID_INTEL_QAT_C62X:
+               case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+                       return true;
+               default:
+                       return false;
+               }
+       }
+
+       return false;
+}
+
+static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
+{
+       if (!vfio_pci_dev_in_denylist(pdev))
+               return false;
+
+       if (disable_denylist) {
+               pci_warn(pdev,
+                        "device denylist disabled - allowing device %04x:%04x.\n",
+                        pdev->vendor, pdev->device);
+               return false;
+       }
+
+       pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
+                pdev->vendor, pdev->device);
+
+       return true;
+}
+
 /*
  * Our VGA arbiter participation is limited since we don't know anything
  * about the device itself.  However, if the device is the only VGA device
@@ -207,6 +249,8 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
                case 0x1580 ... 0x1581:
                case 0x1583 ... 0x158b:
                case 0x37d0 ... 0x37d2:
+               /* X550 */
+               case 0x1563:
                        return true;
                default:
                        return false;
@@ -521,14 +565,12 @@ static void vfio_pci_release(void *device_data)
                vfio_pci_vf_token_user_add(vdev, -1);
                vfio_spapr_pci_eeh_release(vdev->pdev);
                vfio_pci_disable(vdev);
+
                mutex_lock(&vdev->igate);
                if (vdev->err_trigger) {
                        eventfd_ctx_put(vdev->err_trigger);
                        vdev->err_trigger = NULL;
                }
-               mutex_unlock(&vdev->igate);
-
-               mutex_lock(&vdev->igate);
                if (vdev->req_trigger) {
                        eventfd_ctx_put(vdev->req_trigger);
                        vdev->req_trigger = NULL;
@@ -1856,6 +1898,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct iommu_group *group;
        int ret;
 
+       if (vfio_pci_is_denylisted(pdev))
+               return -EINVAL;
+
        if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
@@ -2345,6 +2390,9 @@ static int __init vfio_pci_init(void)
 
        vfio_pci_fill_ids();
 
+       if (disable_denylist)
+               pr_warn("device denylist disabled.\n");
+
        return 0;
 
 out_driver:
index 86a02af..61ca8ab 100644 (file)
 
 struct vfio_pci_ioeventfd {
        struct list_head        next;
+       struct vfio_pci_device  *vdev;
        struct virqfd           *virqfd;
        void __iomem            *addr;
        uint64_t                data;
        loff_t                  pos;
        int                     bar;
        int                     count;
+       bool                    test_mem;
 };
 
 struct vfio_pci_irq_ctx {
index 916b184..9e353c4 100644 (file)
 #define vfio_ioread8   ioread8
 #define vfio_iowrite8  iowrite8
 
+#define VFIO_IOWRITE(size) \
+static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev,                \
+                       bool test_mem, u##size val, void __iomem *io)   \
+{                                                                      \
+       if (test_mem) {                                                 \
+               down_read(&vdev->memory_lock);                          \
+               if (!__vfio_pci_memory_enabled(vdev)) {                 \
+                       up_read(&vdev->memory_lock);                    \
+                       return -EIO;                                    \
+               }                                                       \
+       }                                                               \
+                                                                       \
+       vfio_iowrite##size(val, io);                                    \
+                                                                       \
+       if (test_mem)                                                   \
+               up_read(&vdev->memory_lock);                            \
+                                                                       \
+       return 0;                                                       \
+}
+
+VFIO_IOWRITE(8)
+VFIO_IOWRITE(16)
+VFIO_IOWRITE(32)
+#ifdef iowrite64
+VFIO_IOWRITE(64)
+#endif
+
+#define VFIO_IOREAD(size) \
+static int vfio_pci_ioread##size(struct vfio_pci_device *vdev,         \
+                       bool test_mem, u##size *val, void __iomem *io)  \
+{                                                                      \
+       if (test_mem) {                                                 \
+               down_read(&vdev->memory_lock);                          \
+               if (!__vfio_pci_memory_enabled(vdev)) {                 \
+                       up_read(&vdev->memory_lock);                    \
+                       return -EIO;                                    \
+               }                                                       \
+       }                                                               \
+                                                                       \
+       *val = vfio_ioread##size(io);                                   \
+                                                                       \
+       if (test_mem)                                                   \
+               up_read(&vdev->memory_lock);                            \
+                                                                       \
+       return 0;                                                       \
+}
+
+VFIO_IOREAD(8)
+VFIO_IOREAD(16)
+VFIO_IOREAD(32)
+
 /*
  * Read or write from an __iomem region (MMIO or I/O port) with an excluded
  * range which is inaccessible.  The excluded range drops writes and fills
  * reads with -1.  This is intended for handling MSI-X vector tables and
  * leftover space for ROM BARs.
  */
-static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
+                       void __iomem *io, char __user *buf,
                        loff_t off, size_t count, size_t x_start,
                        size_t x_end, bool iswrite)
 {
        ssize_t done = 0;
+       int ret;
 
        while (count) {
                size_t fillable, filled;
@@ -66,9 +119,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
                                if (copy_from_user(&val, buf, 4))
                                        return -EFAULT;
 
-                               vfio_iowrite32(val, io + off);
+                               ret = vfio_pci_iowrite32(vdev, test_mem,
+                                                        val, io + off);
+                               if (ret)
+                                       return ret;
                        } else {
-                               val = vfio_ioread32(io + off);
+                               ret = vfio_pci_ioread32(vdev, test_mem,
+                                                       &val, io + off);
+                               if (ret)
+                                       return ret;
 
                                if (copy_to_user(buf, &val, 4))
                                        return -EFAULT;
@@ -82,9 +141,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
                                if (copy_from_user(&val, buf, 2))
                                        return -EFAULT;
 
-                               vfio_iowrite16(val, io + off);
+                               ret = vfio_pci_iowrite16(vdev, test_mem,
+                                                        val, io + off);
+                               if (ret)
+                                       return ret;
                        } else {
-                               val = vfio_ioread16(io + off);
+                               ret = vfio_pci_ioread16(vdev, test_mem,
+                                                       &val, io + off);
+                               if (ret)
+                                       return ret;
 
                                if (copy_to_user(buf, &val, 2))
                                        return -EFAULT;
@@ -98,9 +163,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
                                if (copy_from_user(&val, buf, 1))
                                        return -EFAULT;
 
-                               vfio_iowrite8(val, io + off);
+                               ret = vfio_pci_iowrite8(vdev, test_mem,
+                                                       val, io + off);
+                               if (ret)
+                                       return ret;
                        } else {
-                               val = vfio_ioread8(io + off);
+                               ret = vfio_pci_ioread8(vdev, test_mem,
+                                                      &val, io + off);
+                               if (ret)
+                                       return ret;
 
                                if (copy_to_user(buf, &val, 1))
                                        return -EFAULT;
@@ -178,14 +249,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
 
        count = min(count, (size_t)(end - pos));
 
-       if (res->flags & IORESOURCE_MEM) {
-               down_read(&vdev->memory_lock);
-               if (!__vfio_pci_memory_enabled(vdev)) {
-                       up_read(&vdev->memory_lock);
-                       return -EIO;
-               }
-       }
-
        if (bar == PCI_ROM_RESOURCE) {
                /*
                 * The ROM can fill less space than the BAR, so we start the
@@ -213,7 +276,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
                x_end = vdev->msix_offset + vdev->msix_size;
        }
 
-       done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
+       done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos,
+                       count, x_start, x_end, iswrite);
 
        if (done >= 0)
                *ppos += done;
@@ -221,9 +285,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
        if (bar == PCI_ROM_RESOURCE)
                pci_unmap_rom(pdev, io);
 out:
-       if (res->flags & IORESOURCE_MEM)
-               up_read(&vdev->memory_lock);
-
        return done;
 }
 
@@ -278,7 +339,12 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
                return ret;
        }
 
-       done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
+       /*
+        * VGA MMIO is a legacy, non-BAR resource that hopefully allows
+        * probing, so we don't currently worry about access in relation
+        * to the memory enable bit in the command register.
+        */
+       done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite);
 
        vga_put(vdev->pdev, rsrc);
 
@@ -296,17 +362,21 @@ static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
 
        switch (ioeventfd->count) {
        case 1:
-               vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
+               vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
+                                 ioeventfd->data, ioeventfd->addr);
                break;
        case 2:
-               vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
+               vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
+                                  ioeventfd->data, ioeventfd->addr);
                break;
        case 4:
-               vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
+               vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
+                                  ioeventfd->data, ioeventfd->addr);
                break;
 #ifdef iowrite64
        case 8:
-               vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
+               vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
+                                  ioeventfd->data, ioeventfd->addr);
                break;
 #endif
        }
@@ -378,11 +448,13 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
                goto out_unlock;
        }
 
+       ioeventfd->vdev = vdev;
        ioeventfd->addr = vdev->barmap[bar] + pos;
        ioeventfd->data = data;
        ioeventfd->pos = pos;
        ioeventfd->bar = bar;
        ioeventfd->count = count;
+       ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
 
        ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
                                 NULL, NULL, &ioeventfd->virqfd, fd);
index 580099a..262ab0e 100644 (file)
@@ -627,9 +627,10 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
  * that error notification via MSI can be affected for platforms that handle
  * MSI within the same IOVA space as DMA.
  */
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_allowed[] = { "pci-stub" };
 
-static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
+static bool vfio_dev_driver_allowed(struct device *dev,
+                                   struct device_driver *drv)
 {
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
@@ -638,8 +639,8 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
                        return true;
        }
 
-       return match_string(vfio_driver_whitelist,
-                           ARRAY_SIZE(vfio_driver_whitelist),
+       return match_string(vfio_driver_allowed,
+                           ARRAY_SIZE(vfio_driver_allowed),
                            drv->name) >= 0;
 }
 
@@ -648,7 +649,7 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
  * one of the following states:
  *  - driver-less
  *  - bound to a vfio driver
- *  - bound to a whitelisted driver
+ *  - bound to an otherwise allowed driver
  *  - a PCI interconnect device
  *
  * We use two methods to determine whether a device is bound to a vfio
@@ -674,7 +675,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
        }
        mutex_unlock(&group->unbound_lock);
 
-       if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
+       if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
                return 0;
 
        device = vfio_group_get_device(group, dev);
index 16b3adc..fe888b5 100644 (file)
@@ -383,7 +383,7 @@ static void tce_iommu_unuse_page(struct tce_container *container,
        struct page *page;
 
        page = pfn_to_page(hpa >> PAGE_SHIFT);
-       put_page(page);
+       unpin_user_page(page);
 }
 
 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
@@ -486,7 +486,7 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
        struct page *page = NULL;
        enum dma_data_direction direction = iommu_tce_direction(tce);
 
-       if (get_user_pages_fast(tce & PAGE_MASK, 1,
+       if (pin_user_pages_fast(tce & PAGE_MASK, 1,
                        direction != DMA_TO_DEVICE ? FOLL_WRITE : 0,
                        &page) != 1)
                return -EFAULT;
index 5e556ac..c992973 100644 (file)
@@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
        if (ret) {
                bool unlocked = false;
 
-               ret = fixup_user_fault(NULL, mm, vaddr,
+               ret = fixup_user_fault(mm, vaddr,
                                       FAULT_FLAG_REMOTE |
                                       (write_fault ?  FAULT_FLAG_WRITE : 0),
                                       &unlocked);
@@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
                flags |= FOLL_WRITE;
 
        mmap_read_lock(mm);
-       ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+       ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
                                    page, NULL, NULL);
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
@@ -1225,8 +1225,10 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
        return 0;
 
 unwind:
-       list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
+       list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
                iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
+               cond_resched();
+       }
 
        return ret;
 }
@@ -1422,13 +1424,16 @@ static int vfio_bus_type(struct device *dev, void *data)
 static int vfio_iommu_replay(struct vfio_iommu *iommu,
                             struct vfio_domain *domain)
 {
-       struct vfio_domain *d;
+       struct vfio_domain *d = NULL;
        struct rb_node *n;
        unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        int ret;
 
        /* Arbitrarily pick the first domain in the list for lookups */
-       d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
+       if (!list_empty(&iommu->domain_list))
+               d = list_first_entry(&iommu->domain_list,
+                                    struct vfio_domain, next);
+
        n = rb_first(&iommu->dma_list);
 
        for (; n; n = rb_next(n)) {
@@ -1446,6 +1451,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
                                phys_addr_t p;
                                dma_addr_t i;
 
+                               if (WARN_ON(!d)) { /* mapped w/o a domain?! */
+                                       ret = -EINVAL;
+                                       goto unwind;
+                               }
+
                                phys = iommu_iova_to_phys(d->domain, iova);
 
                                if (WARN_ON(!phys)) {
@@ -1475,7 +1485,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
                                if (npage <= 0) {
                                        WARN_ON(!npage);
                                        ret = (int)npage;
-                                       return ret;
+                                       goto unwind;
                                }
 
                                phys = pfn << PAGE_SHIFT;
@@ -1484,14 +1494,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
 
                        ret = iommu_map(domain->domain, iova, phys,
                                        size, dma->prot | domain->prot);
-                       if (ret)
-                               return ret;
+                       if (ret) {
+                               if (!dma->iommu_mapped)
+                                       vfio_unpin_pages_remote(dma, iova,
+                                                       phys >> PAGE_SHIFT,
+                                                       size >> PAGE_SHIFT,
+                                                       true);
+                               goto unwind;
+                       }
 
                        iova += size;
                }
+       }
+
+       /* All dmas are now mapped, defer to second tree walk for unwind */
+       for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+               struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
                dma->iommu_mapped = true;
        }
+
        return 0;
+
+unwind:
+       for (; n; n = rb_prev(n)) {
+               struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+               dma_addr_t iova;
+
+               if (dma->iommu_mapped) {
+                       iommu_unmap(domain->domain, dma->iova, dma->size);
+                       continue;
+               }
+
+               iova = dma->iova;
+               while (iova < dma->iova + dma->size) {
+                       phys_addr_t phys, p;
+                       size_t size;
+                       dma_addr_t i;
+
+                       phys = iommu_iova_to_phys(domain->domain, iova);
+                       if (!phys) {
+                               iova += PAGE_SIZE;
+                               continue;
+                       }
+
+                       size = PAGE_SIZE;
+                       p = phys + size;
+                       i = iova + size;
+                       while (i < dma->iova + dma->size &&
+                              p == iommu_iova_to_phys(domain->domain, i)) {
+                               size += PAGE_SIZE;
+                               p += PAGE_SIZE;
+                               i += PAGE_SIZE;
+                       }
+
+                       iommu_unmap(domain->domain, iova, size);
+                       vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+                                               size >> PAGE_SHIFT, true);
+               }
+       }
+
+       return ret;
 }
 
 /*
@@ -2453,6 +2516,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
        return ret;
 }
 
+static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+                                           unsigned long arg)
+{
+       switch (arg) {
+       case VFIO_TYPE1_IOMMU:
+       case VFIO_TYPE1v2_IOMMU:
+       case VFIO_TYPE1_NESTING_IOMMU:
+               return 1;
+       case VFIO_DMA_CC_IOMMU:
+               if (!iommu)
+                       return 0;
+               return vfio_domains_have_iommu_cache(iommu);
+       default:
+               return 0;
+       }
+}
+
 static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
                 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
                 size_t size)
@@ -2529,241 +2609,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
        return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
 }
 
-static long vfio_iommu_type1_ioctl(void *iommu_data,
-                                  unsigned int cmd, unsigned long arg)
+static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
+                                    unsigned long arg)
 {
-       struct vfio_iommu *iommu = iommu_data;
+       struct vfio_iommu_type1_info info;
        unsigned long minsz;
+       struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+       unsigned long capsz;
+       int ret;
 
-       if (cmd == VFIO_CHECK_EXTENSION) {
-               switch (arg) {
-               case VFIO_TYPE1_IOMMU:
-               case VFIO_TYPE1v2_IOMMU:
-               case VFIO_TYPE1_NESTING_IOMMU:
-                       return 1;
-               case VFIO_DMA_CC_IOMMU:
-                       if (!iommu)
-                               return 0;
-                       return vfio_domains_have_iommu_cache(iommu);
-               default:
-                       return 0;
-               }
-       } else if (cmd == VFIO_IOMMU_GET_INFO) {
-               struct vfio_iommu_type1_info info;
-               struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-               unsigned long capsz;
-               int ret;
-
-               minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
+       minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
 
-               /* For backward compatibility, cannot require this */
-               capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+       /* For backward compatibility, cannot require this */
+       capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
 
-               if (copy_from_user(&info, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&info, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (info.argsz < minsz)
-                       return -EINVAL;
+       if (info.argsz < minsz)
+               return -EINVAL;
 
-               if (info.argsz >= capsz) {
-                       minsz = capsz;
-                       info.cap_offset = 0; /* output, no-recopy necessary */
-               }
+       if (info.argsz >= capsz) {
+               minsz = capsz;
+               info.cap_offset = 0; /* output, no-recopy necessary */
+       }
 
-               mutex_lock(&iommu->lock);
-               info.flags = VFIO_IOMMU_INFO_PGSIZES;
+       mutex_lock(&iommu->lock);
+       info.flags = VFIO_IOMMU_INFO_PGSIZES;
 
-               info.iova_pgsizes = iommu->pgsize_bitmap;
+       info.iova_pgsizes = iommu->pgsize_bitmap;
 
-               ret = vfio_iommu_migration_build_caps(iommu, &caps);
+       ret = vfio_iommu_migration_build_caps(iommu, &caps);
 
-               if (!ret)
-                       ret = vfio_iommu_iova_build_caps(iommu, &caps);
+       if (!ret)
+               ret = vfio_iommu_iova_build_caps(iommu, &caps);
 
-               mutex_unlock(&iommu->lock);
+       mutex_unlock(&iommu->lock);
 
-               if (ret)
-                       return ret;
+       if (ret)
+               return ret;
 
-               if (caps.size) {
-                       info.flags |= VFIO_IOMMU_INFO_CAPS;
+       if (caps.size) {
+               info.flags |= VFIO_IOMMU_INFO_CAPS;
 
-                       if (info.argsz < sizeof(info) + caps.size) {
-                               info.argsz = sizeof(info) + caps.size;
-                       } else {
-                               vfio_info_cap_shift(&caps, sizeof(info));
-                               if (copy_to_user((void __user *)arg +
-                                               sizeof(info), caps.buf,
-                                               caps.size)) {
-                                       kfree(caps.buf);
-                                       return -EFAULT;
-                               }
-                               info.cap_offset = sizeof(info);
+               if (info.argsz < sizeof(info) + caps.size) {
+                       info.argsz = sizeof(info) + caps.size;
+               } else {
+                       vfio_info_cap_shift(&caps, sizeof(info));
+                       if (copy_to_user((void __user *)arg +
+                                       sizeof(info), caps.buf,
+                                       caps.size)) {
+                               kfree(caps.buf);
+                               return -EFAULT;
                        }
-
-                       kfree(caps.buf);
+                       info.cap_offset = sizeof(info);
                }
 
-               return copy_to_user((void __user *)arg, &info, minsz) ?
-                       -EFAULT : 0;
+               kfree(caps.buf);
+       }
 
-       } else if (cmd == VFIO_IOMMU_MAP_DMA) {
-               struct vfio_iommu_type1_dma_map map;
-               uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-                               VFIO_DMA_MAP_FLAG_WRITE;
+       return copy_to_user((void __user *)arg, &info, minsz) ?
+                       -EFAULT : 0;
+}
 
-               minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
+static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
+                                   unsigned long arg)
+{
+       struct vfio_iommu_type1_dma_map map;
+       unsigned long minsz;
+       uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
 
-               if (copy_from_user(&map, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
-               if (map.argsz < minsz || map.flags & ~mask)
-                       return -EINVAL;
+       if (copy_from_user(&map, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               return vfio_dma_do_map(iommu, &map);
+       if (map.argsz < minsz || map.flags & ~mask)
+               return -EINVAL;
 
-       } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
-               struct vfio_iommu_type1_dma_unmap unmap;
-               struct vfio_bitmap bitmap = { 0 };
-               int ret;
+       return vfio_dma_do_map(iommu, &map);
+}
 
-               minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
+static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
+                                     unsigned long arg)
+{
+       struct vfio_iommu_type1_dma_unmap unmap;
+       struct vfio_bitmap bitmap = { 0 };
+       unsigned long minsz;
+       int ret;
 
-               if (copy_from_user(&unmap, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
 
-               if (unmap.argsz < minsz ||
-                   unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
-                       return -EINVAL;
+       if (copy_from_user(&unmap, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
-                       unsigned long pgshift;
+       if (unmap.argsz < minsz ||
+           unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
+               return -EINVAL;
 
-                       if (unmap.argsz < (minsz + sizeof(bitmap)))
-                               return -EINVAL;
+       if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+               unsigned long pgshift;
 
-                       if (copy_from_user(&bitmap,
-                                          (void __user *)(arg + minsz),
-                                          sizeof(bitmap)))
-                               return -EFAULT;
+               if (unmap.argsz < (minsz + sizeof(bitmap)))
+                       return -EINVAL;
 
-                       if (!access_ok((void __user *)bitmap.data, bitmap.size))
-                               return -EINVAL;
+               if (copy_from_user(&bitmap,
+                                  (void __user *)(arg + minsz),
+                                  sizeof(bitmap)))
+                       return -EFAULT;
 
-                       pgshift = __ffs(bitmap.pgsize);
-                       ret = verify_bitmap_size(unmap.size >> pgshift,
-                                                bitmap.size);
-                       if (ret)
-                               return ret;
-               }
+               if (!access_ok((void __user *)bitmap.data, bitmap.size))
+                       return -EINVAL;
 
-               ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+               pgshift = __ffs(bitmap.pgsize);
+               ret = verify_bitmap_size(unmap.size >> pgshift,
+                                        bitmap.size);
                if (ret)
                        return ret;
+       }
 
-               return copy_to_user((void __user *)arg, &unmap, minsz) ?
+       ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
+       if (ret)
+               return ret;
+
+       return copy_to_user((void __user *)arg, &unmap, minsz) ?
                        -EFAULT : 0;
-       } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
-               struct vfio_iommu_type1_dirty_bitmap dirty;
-               uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
-                               VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
-                               VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
-               int ret = 0;
+}
 
-               if (!iommu->v2)
-                       return -EACCES;
+static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
+                                       unsigned long arg)
+{
+       struct vfio_iommu_type1_dirty_bitmap dirty;
+       uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+                       VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+                       VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+       unsigned long minsz;
+       int ret = 0;
 
-               minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
-                                   flags);
+       if (!iommu->v2)
+               return -EACCES;
 
-               if (copy_from_user(&dirty, (void __user *)arg, minsz))
-                       return -EFAULT;
+       minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
 
-               if (dirty.argsz < minsz || dirty.flags & ~mask)
-                       return -EINVAL;
+       if (copy_from_user(&dirty, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               /* only one flag should be set at a time */
-               if (__ffs(dirty.flags) != __fls(dirty.flags))
-                       return -EINVAL;
+       if (dirty.argsz < minsz || dirty.flags & ~mask)
+               return -EINVAL;
 
-               if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
-                       size_t pgsize;
+       /* only one flag should be set at a time */
+       if (__ffs(dirty.flags) != __fls(dirty.flags))
+               return -EINVAL;
 
-                       mutex_lock(&iommu->lock);
-                       pgsize = 1 << __ffs(iommu->pgsize_bitmap);
-                       if (!iommu->dirty_page_tracking) {
-                               ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
-                               if (!ret)
-                                       iommu->dirty_page_tracking = true;
-                       }
-                       mutex_unlock(&iommu->lock);
-                       return ret;
-               } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
-                       mutex_lock(&iommu->lock);
-                       if (iommu->dirty_page_tracking) {
-                               iommu->dirty_page_tracking = false;
-                               vfio_dma_bitmap_free_all(iommu);
-                       }
-                       mutex_unlock(&iommu->lock);
-                       return 0;
-               } else if (dirty.flags &
-                                VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
-                       struct vfio_iommu_type1_dirty_bitmap_get range;
-                       unsigned long pgshift;
-                       size_t data_size = dirty.argsz - minsz;
-                       size_t iommu_pgsize;
-
-                       if (!data_size || data_size < sizeof(range))
-                               return -EINVAL;
-
-                       if (copy_from_user(&range, (void __user *)(arg + minsz),
-                                          sizeof(range)))
-                               return -EFAULT;
+       if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+               size_t pgsize;
 
-                       if (range.iova + range.size < range.iova)
-                               return -EINVAL;
-                       if (!access_ok((void __user *)range.bitmap.data,
-                                      range.bitmap.size))
-                               return -EINVAL;
+               mutex_lock(&iommu->lock);
+               pgsize = 1 << __ffs(iommu->pgsize_bitmap);
+               if (!iommu->dirty_page_tracking) {
+                       ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
+                       if (!ret)
+                               iommu->dirty_page_tracking = true;
+               }
+               mutex_unlock(&iommu->lock);
+               return ret;
+       } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+               mutex_lock(&iommu->lock);
+               if (iommu->dirty_page_tracking) {
+                       iommu->dirty_page_tracking = false;
+                       vfio_dma_bitmap_free_all(iommu);
+               }
+               mutex_unlock(&iommu->lock);
+               return 0;
+       } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+               struct vfio_iommu_type1_dirty_bitmap_get range;
+               unsigned long pgshift;
+               size_t data_size = dirty.argsz - minsz;
+               size_t iommu_pgsize;
 
-                       pgshift = __ffs(range.bitmap.pgsize);
-                       ret = verify_bitmap_size(range.size >> pgshift,
-                                                range.bitmap.size);
-                       if (ret)
-                               return ret;
+               if (!data_size || data_size < sizeof(range))
+                       return -EINVAL;
 
-                       mutex_lock(&iommu->lock);
+               if (copy_from_user(&range, (void __user *)(arg + minsz),
+                                  sizeof(range)))
+                       return -EFAULT;
+
+               if (range.iova + range.size < range.iova)
+                       return -EINVAL;
+               if (!access_ok((void __user *)range.bitmap.data,
+                              range.bitmap.size))
+                       return -EINVAL;
 
-                       iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+               pgshift = __ffs(range.bitmap.pgsize);
+               ret = verify_bitmap_size(range.size >> pgshift,
+                                        range.bitmap.size);
+               if (ret)
+                       return ret;
 
-                       /* allow only smallest supported pgsize */
-                       if (range.bitmap.pgsize != iommu_pgsize) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
-                       if (range.iova & (iommu_pgsize - 1)) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
-                       if (!range.size || range.size & (iommu_pgsize - 1)) {
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       }
+               mutex_lock(&iommu->lock);
 
-                       if (iommu->dirty_page_tracking)
-                               ret = vfio_iova_dirty_bitmap(range.bitmap.data,
-                                               iommu, range.iova, range.size,
-                                               range.bitmap.pgsize);
-                       else
-                               ret = -EINVAL;
-out_unlock:
-                       mutex_unlock(&iommu->lock);
+               iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
 
-                       return ret;
+               /* allow only smallest supported pgsize */
+               if (range.bitmap.pgsize != iommu_pgsize) {
+                       ret = -EINVAL;
+                       goto out_unlock;
                }
+               if (range.iova & (iommu_pgsize - 1)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               if (!range.size || range.size & (iommu_pgsize - 1)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               if (iommu->dirty_page_tracking)
+                       ret = vfio_iova_dirty_bitmap(range.bitmap.data,
+                                                    iommu, range.iova,
+                                                    range.size,
+                                                    range.bitmap.pgsize);
+               else
+                       ret = -EINVAL;
+out_unlock:
+               mutex_unlock(&iommu->lock);
+
+               return ret;
        }
 
-       return -ENOTTY;
+       return -EINVAL;
+}
+
+static long vfio_iommu_type1_ioctl(void *iommu_data,
+                                  unsigned int cmd, unsigned long arg)
+{
+       struct vfio_iommu *iommu = iommu_data;
+
+       switch (cmd) {
+       case VFIO_CHECK_EXTENSION:
+               return vfio_iommu_type1_check_extension(iommu, arg);
+       case VFIO_IOMMU_GET_INFO:
+               return vfio_iommu_type1_get_info(iommu, arg);
+       case VFIO_IOMMU_MAP_DMA:
+               return vfio_iommu_type1_map_dma(iommu, arg);
+       case VFIO_IOMMU_UNMAP_DMA:
+               return vfio_iommu_type1_unmap_dma(iommu, arg);
+       case VFIO_IOMMU_DIRTY_PAGES:
+               return vfio_iommu_type1_dirty_pages(iommu, arg);
+       default:
+               return -ENOTTY;
+       }
 }
 
 static int vfio_iommu_type1_register_notifier(void *iommu_data,
index d3688c6..587fbae 100644 (file)
@@ -65,6 +65,7 @@ config VHOST_VDPA
        tristate "Vhost driver for vDPA-based backend"
        depends on EVENTFD
        select VHOST
+       select IRQ_BYPASS_MANAGER
        depends on VDPA
        help
          This kernel module can be loaded in host kernel to accelerate
index eea902b..531a00d 100644 (file)
@@ -73,7 +73,7 @@ enum {
        VHOST_NET_FEATURES = VHOST_FEATURES |
                         (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
                         (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
-                        (1ULL << VIRTIO_F_IOMMU_PLATFORM)
+                        (1ULL << VIRTIO_F_ACCESS_PLATFORM)
 };
 
 enum {
@@ -1615,21 +1615,6 @@ done:
        return err;
 }
 
-static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
-{
-       int i;
-
-       mutex_lock(&n->dev.mutex);
-       for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
-               mutex_lock(&n->vqs[i].vq.mutex);
-               n->vqs[i].vq.acked_backend_features = features;
-               mutex_unlock(&n->vqs[i].vq.mutex);
-       }
-       mutex_unlock(&n->dev.mutex);
-
-       return 0;
-}
-
 static int vhost_net_set_features(struct vhost_net *n, u64 features)
 {
        size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
            !vhost_log_access_ok(&n->dev))
                goto out_unlock;
 
-       if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+       if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
                if (vhost_init_device_iotlb(&n->dev, true))
                        goto out_unlock;
        }
@@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
                        return -EFAULT;
                if (features & ~VHOST_NET_BACKEND_FEATURES)
                        return -EOPNOTSUPP;
-               return vhost_net_set_backend_features(n, features);
+               vhost_set_backend_features(&n->dev, features);
+               return 0;
        case VHOST_RESET_OWNER:
                return vhost_net_reset_owner(n);
        case VHOST_SET_OWNER:
index a54b60d..3fab94f 100644 (file)
 #include "vhost.h"
 
 enum {
-       VHOST_VDPA_FEATURES =
-               (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
-               (1ULL << VIRTIO_F_ANY_LAYOUT) |
-               (1ULL << VIRTIO_F_VERSION_1) |
-               (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
-               (1ULL << VIRTIO_F_RING_PACKED) |
-               (1ULL << VIRTIO_F_ORDER_PLATFORM) |
-               (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
-               (1ULL << VIRTIO_RING_F_EVENT_IDX),
-
-       VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
-               (1ULL << VIRTIO_NET_F_CSUM) |
-               (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
-               (1ULL << VIRTIO_NET_F_MTU) |
-               (1ULL << VIRTIO_NET_F_MAC) |
-               (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
-               (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
-               (1ULL << VIRTIO_NET_F_GUEST_ECN) |
-               (1ULL << VIRTIO_NET_F_GUEST_UFO) |
-               (1ULL << VIRTIO_NET_F_HOST_TSO4) |
-               (1ULL << VIRTIO_NET_F_HOST_TSO6) |
-               (1ULL << VIRTIO_NET_F_HOST_ECN) |
-               (1ULL << VIRTIO_NET_F_HOST_UFO) |
-               (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
-               (1ULL << VIRTIO_NET_F_STATUS) |
-               (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
+       VHOST_VDPA_BACKEND_FEATURES =
+       (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+       (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
 };
 
-/* Currently, only network backend w/o multiqueue is supported. */
-#define VHOST_VDPA_VQ_MAX      2
-
 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
 
 struct vhost_vdpa {
@@ -73,16 +47,13 @@ struct vhost_vdpa {
        int virtio_id;
        int minor;
        struct eventfd_ctx *config_ctx;
+       int in_batch;
 };
 
 static DEFINE_IDA(vhost_vdpa_ida);
 
 static dev_t vhost_vdpa_major;
 
-static const u64 vhost_vdpa_features[] = {
-       [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
-};
-
 static void handle_vq_kick(struct vhost_work *work)
 {
        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -96,7 +67,7 @@ static void handle_vq_kick(struct vhost_work *work)
 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
 {
        struct vhost_virtqueue *vq = private;
-       struct eventfd_ctx *call_ctx = vq->call_ctx;
+       struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
 
        if (call_ctx)
                eventfd_signal(call_ctx, 1);
@@ -115,12 +86,45 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
        return IRQ_HANDLED;
 }
 
+static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+       struct vhost_virtqueue *vq = &v->vqs[qid];
+       const struct vdpa_config_ops *ops = v->vdpa->config;
+       struct vdpa_device *vdpa = v->vdpa;
+       int ret, irq;
+
+       if (!ops->get_vq_irq)
+               return;
+
+       irq = ops->get_vq_irq(vdpa, qid);
+       spin_lock(&vq->call_ctx.ctx_lock);
+       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       if (!vq->call_ctx.ctx || irq < 0) {
+               spin_unlock(&vq->call_ctx.ctx_lock);
+               return;
+       }
+
+       vq->call_ctx.producer.token = vq->call_ctx.ctx;
+       vq->call_ctx.producer.irq = irq;
+       ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+       spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
+static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
+{
+       struct vhost_virtqueue *vq = &v->vqs[qid];
+
+       spin_lock(&vq->call_ctx.ctx_lock);
+       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       spin_unlock(&vq->call_ctx.ctx_lock);
+}
+
 static void vhost_vdpa_reset(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
-       const struct vdpa_config_ops *ops = vdpa->config;
 
-       ops->set_status(vdpa, 0);
+       vdpa_reset(vdpa);
+       v->in_batch = 0;
 }
 
 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -155,11 +159,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 {
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
-       u8 status;
+       u8 status, status_old;
+       int nvqs = v->nvqs;
+       u16 i;
 
        if (copy_from_user(&status, statusp, sizeof(status)))
                return -EFAULT;
 
+       status_old = ops->get_status(vdpa);
+
        /*
         * Userspace shouldn't remove status bits unless reset the
         * status to 0.
@@ -169,6 +177,14 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
 
        ops->set_status(vdpa, status);
 
+       if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
+               for (i = 0; i < nvqs; i++)
+                       vhost_vdpa_setup_vq_irq(v, i);
+
+       if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
+               for (i = 0; i < nvqs; i++)
+                       vhost_vdpa_unsetup_vq_irq(v, i);
+
        return 0;
 }
 
@@ -196,7 +212,6 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
                                  struct vhost_vdpa_config __user *c)
 {
        struct vdpa_device *vdpa = v->vdpa;
-       const struct vdpa_config_ops *ops = vdpa->config;
        struct vhost_vdpa_config config;
        unsigned long size = offsetof(struct vhost_vdpa_config, buf);
        u8 *buf;
@@ -209,7 +224,7 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v,
        if (!buf)
                return -ENOMEM;
 
-       ops->get_config(vdpa, config.off, buf, config.len);
+       vdpa_get_config(vdpa, config.off, buf, config.len);
 
        if (copy_to_user(c->buf, buf, config.len)) {
                kvfree(buf);
@@ -255,7 +270,6 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
        u64 features;
 
        features = ops->get_features(vdpa);
-       features &= vhost_vdpa_features[v->virtio_id];
 
        if (copy_to_user(featurep, &features, sizeof(features)))
                return -EFAULT;
@@ -279,10 +293,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
        if (copy_from_user(&features, featurep, sizeof(features)))
                return -EFAULT;
 
-       if (features & ~vhost_vdpa_features[v->virtio_id])
-               return -EINVAL;
-
-       if (ops->set_features(vdpa, features))
+       if (vdpa_set_features(vdpa, features))
                return -EINVAL;
 
        return 0;
@@ -332,14 +343,18 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
 
        return 0;
 }
+
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                                   void __user *argp)
 {
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       struct vdpa_vq_state vq_state;
        struct vdpa_callback cb;
        struct vhost_virtqueue *vq;
        struct vhost_vring_state s;
+       u64 __user *featurep = argp;
+       u64 features;
        u32 idx;
        long r;
 
@@ -353,15 +368,32 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
        idx = array_index_nospec(idx, v->nvqs);
        vq = &v->vqs[idx];
 
-       if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+       switch (cmd) {
+       case VHOST_VDPA_SET_VRING_ENABLE:
                if (copy_from_user(&s, argp, sizeof(s)))
                        return -EFAULT;
                ops->set_vq_ready(vdpa, idx, s.num);
                return 0;
-       }
+       case VHOST_GET_VRING_BASE:
+               r = ops->get_vq_state(v->vdpa, idx, &vq_state);
+               if (r)
+                       return r;
 
-       if (cmd == VHOST_GET_VRING_BASE)
-               vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+               vq->last_avail_idx = vq_state.avail_index;
+               break;
+       case VHOST_GET_BACKEND_FEATURES:
+               features = VHOST_VDPA_BACKEND_FEATURES;
+               if (copy_to_user(featurep, &features, sizeof(features)))
+                       return -EFAULT;
+               return 0;
+       case VHOST_SET_BACKEND_FEATURES:
+               if (copy_from_user(&features, featurep, sizeof(features)))
+                       return -EFAULT;
+               if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+                       return -EOPNOTSUPP;
+               vhost_set_backend_features(&v->vdev, features);
+               return 0;
+       }
 
        r = vhost_vring_ioctl(&v->vdev, cmd, argp);
        if (r)
@@ -377,12 +409,13 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                break;
 
        case VHOST_SET_VRING_BASE:
-               if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
+               vq_state.avail_index = vq->last_avail_idx;
+               if (ops->set_vq_state(vdpa, idx, &vq_state))
                        r = -EINVAL;
                break;
 
        case VHOST_SET_VRING_CALL:
-               if (vq->call_ctx) {
+               if (vq->call_ctx.ctx) {
                        cb.callback = vhost_vdpa_virtqueue_cb;
                        cb.private = vq;
                } else {
@@ -390,6 +423,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                        cb.private = NULL;
                }
                ops->set_vq_cb(vdpa, idx, &cb);
+               vhost_vdpa_setup_vq_irq(v, idx);
                break;
 
        case VHOST_SET_VRING_NUM:
@@ -519,13 +553,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
        if (r)
                return r;
 
-       if (ops->dma_map)
+       if (ops->dma_map) {
                r = ops->dma_map(vdpa, iova, size, pa, perm);
-       else if (ops->set_map)
-               r = ops->set_map(vdpa, dev->iotlb);
-       else
+       } else if (ops->set_map) {
+               if (!v->in_batch)
+                       r = ops->set_map(vdpa, dev->iotlb);
+       } else {
                r = iommu_map(v->domain, iova, pa, size,
                              perm_to_iommu_flags(perm));
+       }
 
        return r;
 }
@@ -538,12 +574,14 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
 
        vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
 
-       if (ops->dma_map)
+       if (ops->dma_map) {
                ops->dma_unmap(vdpa, iova, size);
-       else if (ops->set_map)
-               ops->set_map(vdpa, dev->iotlb);
-       else
+       } else if (ops->set_map) {
+               if (!v->in_batch)
+                       ops->set_map(vdpa, dev->iotlb);
+       } else {
                iommu_unmap(v->domain, iova, size);
+       }
 }
 
 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
@@ -636,6 +674,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
                                        struct vhost_iotlb_msg *msg)
 {
        struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
        int r = 0;
 
        r = vhost_dev_check_owner(dev);
@@ -649,6 +689,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
        case VHOST_IOTLB_INVALIDATE:
                vhost_vdpa_unmap(v, msg->iova, msg->size);
                break;
+       case VHOST_IOTLB_BATCH_BEGIN:
+               v->in_batch = true;
+               break;
+       case VHOST_IOTLB_BATCH_END:
+               if (v->in_batch && ops->set_map)
+                       ops->set_map(vdpa, dev->iotlb);
+               v->in_batch = false;
+               break;
        default:
                r = -EINVAL;
                break;
@@ -765,6 +813,18 @@ err:
        return r;
 }
 
+static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+{
+       struct vhost_virtqueue *vq;
+       int i;
+
+       for (i = 0; i < v->nvqs; i++) {
+               vq = &v->vqs[i];
+               if (vq->call_ctx.producer.irq)
+                       irq_bypass_unregister_producer(&vq->call_ctx.producer);
+       }
+}
+
 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
 {
        struct vhost_vdpa *v = filep->private_data;
@@ -777,6 +837,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_vdpa_iotlb_free(v);
        vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
+       vhost_vdpa_clean_irq(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
        mutex_unlock(&d->mutex);
@@ -872,7 +933,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
 {
        const struct vdpa_config_ops *ops = vdpa->config;
        struct vhost_vdpa *v;
-       int minor, nvqs = VHOST_VDPA_VQ_MAX;
+       int minor;
        int r;
 
        /* Currently, we only accept the network devices. */
@@ -893,14 +954,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
        atomic_set(&v->opened, 0);
        v->minor = minor;
        v->vdpa = vdpa;
-       v->nvqs = nvqs;
+       v->nvqs = vdpa->nvqs;
        v->virtio_id = ops->get_device_id(vdpa);
 
        device_initialize(&v->dev);
        v->dev.release = vhost_vdpa_release_dev;
        v->dev.parent = &vdpa->dev;
        v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
-       v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
+       v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
                               GFP_KERNEL);
        if (!v->vqs) {
                r = -ENOMEM;
index 74d135e..5857d4e 100644 (file)
@@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
                __vhost_vq_meta_reset(d->vqs[i]);
 }
 
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+       call_ctx->ctx = NULL;
+       memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+       spin_lock_init(&call_ctx->ctx_lock);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->log_base = NULL;
        vq->error_ctx = NULL;
        vq->kick = NULL;
-       vq->call_ctx = NULL;
        vq->log_ctx = NULL;
        vhost_reset_is_le(vq);
        vhost_disable_cross_endian(vq);
        vq->busyloop_timeout = 0;
        vq->umem = NULL;
        vq->iotlb = NULL;
+       vhost_vring_call_reset(&vq->call_ctx);
        __vhost_vq_meta_reset(vq);
 }
 
@@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
                        eventfd_ctx_put(dev->vqs[i]->error_ctx);
                if (dev->vqs[i]->kick)
                        fput(dev->vqs[i]->kick);
-               if (dev->vqs[i]->call_ctx)
-                       eventfd_ctx_put(dev->vqs[i]->call_ctx);
+               if (dev->vqs[i]->call_ctx.ctx)
+                       eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
                vhost_vq_reset(dev, dev->vqs[i]);
        }
        vhost_dev_free_iovecs(dev);
@@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 
        memcpy(newmem, &mem, size);
        if (copy_from_user(newmem->regions, m->regions,
-                          mem.nregions * sizeof *m->regions)) {
+                          flex_array_size(newmem, regions, mem.nregions))) {
                kvfree(newmem);
                return -EFAULT;
        }
@@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
                        r = PTR_ERR(ctx);
                        break;
                }
-               swap(ctx, vq->call_ctx);
+
+               spin_lock(&vq->call_ctx.ctx_lock);
+               swap(ctx, vq->call_ctx.ctx);
+               spin_unlock(&vq->call_ctx.ctx_lock);
                break;
        case VHOST_SET_VRING_ERR:
                if (copy_from_user(&f, argp, sizeof f)) {
@@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
        /* Signal the Guest tell them we used something up. */
-       if (vq->call_ctx && vhost_notify(dev, vq))
-               eventfd_signal(vq->call_ctx, 1);
+       if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+               eventfd_signal(vq->call_ctx.ctx, 1);
 }
 EXPORT_SYMBOL_GPL(vhost_signal);
 
@@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
 }
 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
 
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+       struct vhost_virtqueue *vq;
+       int i;
+
+       mutex_lock(&dev->mutex);
+       for (i = 0; i < dev->nvqs; ++i) {
+               vq = dev->vqs[i];
+               mutex_lock(&vq->mutex);
+               vq->acked_backend_features = features;
+               mutex_unlock(&vq->mutex);
+       }
+       mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
 
 static int __init vhost_init(void)
 {
index c8e96a0..9032d3c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
 #include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
 
 struct vhost_work;
 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -60,6 +61,12 @@ enum vhost_uaddr_type {
        VHOST_NUM_ADDRS = 3,
 };
 
+struct vhost_vring_call {
+       struct eventfd_ctx *ctx;
+       struct irq_bypass_producer producer;
+       spinlock_t ctx_lock;
+};
+
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
        struct vhost_dev *dev;
@@ -72,7 +79,7 @@ struct vhost_virtqueue {
        vring_used_t __user *used;
        const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
        struct file *kick;
-       struct eventfd_ctx *call_ctx;
+       struct vhost_vring_call call_ctx;
        struct eventfd_ctx *error_ctx;
        struct eventfd_ctx *log_ctx;
 
@@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
                       struct vhost_msg_node *node);
 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
                                         struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
                            poll_table *wait);
 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
index 20d96a5..25e409b 100644 (file)
@@ -121,18 +121,7 @@ out:
 
 static int pm860x_backlight_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.state & BL_CORE_SUSPENDED)
-               brightness = 0;
-
-       return pm860x_backlight_set(bl, brightness);
+       return pm860x_backlight_set(bl, backlight_get_brightness(bl));
 }
 
 static int pm860x_backlight_get_brightness(struct backlight_device *bl)
index 7d22d73..87f9fc2 100644 (file)
@@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX
          To compile this driver as a module, choose M here: the module will
          be called ep93xx_bl.
 
-config BACKLIGHT_GENERIC
-       tristate "Generic (aka Sharp Corgi) Backlight Driver"
-       default y
-       help
-         Say y to enable the generic platform backlight driver previously
-         known as the Corgi backlight driver. If you have a Sharp Zaurus
-         SL-C7xx, SL-Cxx00 or SL-6000x say y.
-
 config BACKLIGHT_IPAQ_MICRO
        tristate "iPAQ microcontroller backlight driver"
        depends on MFD_IPAQ_MICRO
@@ -386,13 +378,6 @@ config BACKLIGHT_LP8788
        help
          This supports TI LP8788 backlight driver.
 
-config BACKLIGHT_OT200
-       tristate "Backlight driver for ot200 visualisation device"
-       depends on CS5535_MFGPT && GPIO_CS5535
-       help
-         To compile this driver as a module, choose M here: the module will be
-         called ot200_bl.
-
 config BACKLIGHT_PANDORA
        tristate "Backlight driver for Pandora console"
        depends on TWL4030_CORE
index 0c1a152..13463b9 100644 (file)
@@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE)  += backlight.o
 obj-$(CONFIG_BACKLIGHT_DA903X)         += da903x_bl.o
 obj-$(CONFIG_BACKLIGHT_DA9052)         += da9052_bl.o
 obj-$(CONFIG_BACKLIGHT_EP93XX)         += ep93xx_bl.o
-obj-$(CONFIG_BACKLIGHT_GENERIC)                += generic_bl.o
 obj-$(CONFIG_BACKLIGHT_GPIO)           += gpio_backlight.o
 obj-$(CONFIG_BACKLIGHT_HP680)          += hp680_bl.o
 obj-$(CONFIG_BACKLIGHT_HP700)          += jornada720_bl.o
@@ -45,7 +44,6 @@ obj-$(CONFIG_BACKLIGHT_LP8788)                += lp8788_bl.o
 obj-$(CONFIG_BACKLIGHT_LV5207LP)       += lv5207lp.o
 obj-$(CONFIG_BACKLIGHT_MAX8925)                += max8925_bl.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)          += omap1_bl.o
-obj-$(CONFIG_BACKLIGHT_OT200)          += ot200_bl.o
 obj-$(CONFIG_BACKLIGHT_PANDORA)                += pandora_bl.o
 obj-$(CONFIG_BACKLIGHT_PCF50633)       += pcf50633-backlight.o
 obj-$(CONFIG_BACKLIGHT_PWM)            += pwm_bl.o
index 0f63f76..686988c 100644 (file)
@@ -65,15 +65,7 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
 
 static int adp5520_bl_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       return adp5520_bl_set(bl, brightness);
+       return adp5520_bl_set(bl, backlight_get_brightness(bl));
 }
 
 static int adp5520_bl_get_brightness(struct backlight_device *bl)
index 1996810..ddc7f5f 100644 (file)
@@ -361,15 +361,7 @@ static int adp8860_bl_set(struct backlight_device *bl, int brightness)
 
 static int adp8860_bl_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       return adp8860_bl_set(bl, brightness);
+       return adp8860_bl_set(bl, backlight_get_brightness(bl));
 }
 
 static int adp8860_bl_get_brightness(struct backlight_device *bl)
index 4c00320..8b5213a 100644 (file)
@@ -399,15 +399,7 @@ static int adp8870_bl_set(struct backlight_device *bl, int brightness)
 
 static int adp8870_bl_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       return adp8870_bl_set(bl, brightness);
+       return adp8870_bl_set(bl, backlight_get_brightness(bl));
 }
 
 static int adp8870_bl_get_brightness(struct backlight_device *bl)
index 33f0f0f..3b60019 100644 (file)
@@ -104,17 +104,10 @@ static int as3711_bl_update_status(struct backlight_device *bl)
        struct as3711_bl_data *data = bl_get_data(bl);
        struct as3711_bl_supply *supply = to_supply(data);
        struct as3711 *as3711 = supply->as3711;
-       int brightness = bl->props.brightness;
+       int brightness;
        int ret = 0;
 
-       dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n",
-               __func__, bl->props.brightness, bl->props.power,
-               bl->props.fb_blank, bl->props.state);
-
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK ||
-           bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
-               brightness = 0;
+       brightness = backlight_get_brightness(bl);
 
        if (data->type == AS3711_BL_SU1) {
                ret = as3711_set_brightness_v(as3711, brightness,
index 92d80aa..537fe1b 100644 (file)
 #include <asm/backlight.h>
 #endif
 
+/**
+ * DOC: overview
+ *
+ * The backlight core supports implementing backlight drivers.
+ *
+ * A backlight driver registers a driver using
+ * devm_backlight_device_register(). The properties of the backlight
+ * driver such as type and max_brightness must be specified.
+ * When the core detect changes in for example brightness or power state
+ * the update_status() operation is called. The backlight driver shall
+ * implement this operation and use it to adjust backlight.
+ *
+ * Several sysfs attributes are provided by the backlight core::
+ *
+ * - brightness         R/W, set the requested brightness level
+ * - actual_brightness  RO, the brightness level used by the HW
+ * - max_brightness     RO, the maximum  brightness level supported
+ *
+ * See Documentation/ABI/stable/sysfs-class-backlight for the full list.
+ *
+ * The backlight can be adjusted using the sysfs interface, and
+ * the backlight driver may also support adjusting backlight using
+ * a hot-key or some other platform or firmware specific way.
+ *
+ * The driver must implement the get_brightness() operation if
+ * the HW do not support all the levels that can be specified in
+ * brightness, thus providing user-space access to the actual level
+ * via the actual_brightness attribute.
+ *
+ * When the backlight changes this is reported to user-space using
+ * an uevent connected to the actual_brightness attribute.
+ * When brightness is set by platform specific means, for example
+ * a hot-key to adjust backlight, the driver must notify the backlight
+ * core that brightness has changed using backlight_force_update().
+ *
+ * The backlight driver core receives notifications from fbdev and
+ * if the event is FB_EVENT_BLANK and if the value of blank, from the
+ * FBIOBLANK ioctrl, results in a change in the backlight state the
+ * update_status() operation is called.
+ */
+
 static struct list_head backlight_dev_list;
 static struct mutex backlight_dev_list_mutex;
 static struct blocking_notifier_head backlight_notifier;
@@ -40,9 +81,17 @@ static const char *const backlight_scale_types[] = {
 
 #if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
                           defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is and necessary, we're switching backlight power as well ...
+/*
+ * fb_notifier_callback
+ *
+ * This callback gets called when something important happens inside a
+ * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
+ * which is reported to the driver using backlight_update_status()
+ * as a state change.
+ *
+ * There may be several fbdev's connected to the backlight device,
+ * in which case they are kept track of. A state change is only reported
+ * if there is a change in backlight for the specified fbdev.
  */
 static int fb_notifier_callback(struct notifier_block *self,
                                unsigned long event, void *data)
@@ -58,28 +107,29 @@ static int fb_notifier_callback(struct notifier_block *self,
 
        bd = container_of(self, struct backlight_device, fb_notif);
        mutex_lock(&bd->ops_lock);
-       if (bd->ops)
-               if (!bd->ops->check_fb ||
-                   bd->ops->check_fb(bd, evdata->info)) {
-                       fb_blank = *(int *)evdata->data;
-                       if (fb_blank == FB_BLANK_UNBLANK &&
-                           !bd->fb_bl_on[node]) {
-                               bd->fb_bl_on[node] = true;
-                               if (!bd->use_count++) {
-                                       bd->props.state &= ~BL_CORE_FBBLANK;
-                                       bd->props.fb_blank = FB_BLANK_UNBLANK;
-                                       backlight_update_status(bd);
-                               }
-                       } else if (fb_blank != FB_BLANK_UNBLANK &&
-                                  bd->fb_bl_on[node]) {
-                               bd->fb_bl_on[node] = false;
-                               if (!(--bd->use_count)) {
-                                       bd->props.state |= BL_CORE_FBBLANK;
-                                       bd->props.fb_blank = fb_blank;
-                                       backlight_update_status(bd);
-                               }
-                       }
+
+       if (!bd->ops)
+               goto out;
+       if (bd->ops->check_fb && !bd->ops->check_fb(bd, evdata->info))
+               goto out;
+
+       fb_blank = *(int *)evdata->data;
+       if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
+               bd->fb_bl_on[node] = true;
+               if (!bd->use_count++) {
+                       bd->props.state &= ~BL_CORE_FBBLANK;
+                       bd->props.fb_blank = FB_BLANK_UNBLANK;
+                       backlight_update_status(bd);
                }
+       } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
+               bd->fb_bl_on[node] = false;
+               if (!(--bd->use_count)) {
+                       bd->props.state |= BL_CORE_FBBLANK;
+                       bd->props.fb_blank = fb_blank;
+                       backlight_update_status(bd);
+               }
+       }
+out:
        mutex_unlock(&bd->ops_lock);
        return 0;
 }
@@ -320,9 +370,13 @@ ATTRIBUTE_GROUPS(bl_device);
  * backlight_force_update - tell the backlight subsystem that hardware state
  *   has changed
  * @bd: the backlight device to update
+ * @reason: reason for update
  *
  * Updates the internal state of the backlight in response to a hardware event,
- * and generate a uevent to notify userspace
+ * and generates an uevent to notify userspace. A backlight driver shall call
+ * backlight_force_update() when the backlight is changed using, for example,
+ * a hot-key. The updated brightness is read using get_brightness() and the
+ * brightness value is reported using an uevent.
  */
 void backlight_force_update(struct backlight_device *bd,
                            enum backlight_update_reason reason)
@@ -335,19 +389,7 @@ void backlight_force_update(struct backlight_device *bd,
 }
 EXPORT_SYMBOL(backlight_force_update);
 
-/**
- * backlight_device_register - create and register a new object of
- *   backlight_device class.
- * @name: the name of the new object(must be the same as the name of the
- *   respective framebuffer device).
- * @parent: a pointer to the parent device
- * @devdata: an optional pointer to be stored for private driver use. The
- *   methods may retrieve it by using bl_get_data(bd).
- * @ops: the backlight operations structure.
- *
- * Creates and registers new backlight device. Returns either an
- * ERR_PTR() or a pointer to the newly allocated device.
- */
+/* deprecated - use devm_backlight_device_register() */
 struct backlight_device *backlight_device_register(const char *name,
        struct device *parent, void *devdata, const struct backlight_ops *ops,
        const struct backlight_properties *props)
@@ -414,6 +456,15 @@ struct backlight_device *backlight_device_register(const char *name,
 }
 EXPORT_SYMBOL(backlight_device_register);
 
+/** backlight_device_get_by_type - find first backlight device of a type
+ * @type: the type of backlight device
+ *
+ * Look up the first backlight device of the specified type
+ *
+ * RETURNS:
+ *
+ * Pointer to backlight device if any was found. Otherwise NULL.
+ */
 struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
 {
        bool found = false;
@@ -453,12 +504,7 @@ struct backlight_device *backlight_device_get_by_name(const char *name)
 }
 EXPORT_SYMBOL(backlight_device_get_by_name);
 
-/**
- * backlight_device_unregister - unregisters a backlight device object.
- * @bd: the backlight device object to be unregistered and freed.
- *
- * Unregisters a previously registered via backlight_device_register object.
- */
+/* deprecated - use devm_backlight_device_unregister() */
 void backlight_device_unregister(struct backlight_device *bd)
 {
        if (!bd)
@@ -506,10 +552,12 @@ static int devm_backlight_device_match(struct device *dev, void *res,
  * backlight_register_notifier - get notified of backlight (un)registration
  * @nb: notifier block with the notifier to call on backlight (un)registration
  *
- * @return 0 on success, otherwise a negative error code
- *
  * Register a notifier to get notified when backlight devices get registered
  * or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
  */
 int backlight_register_notifier(struct notifier_block *nb)
 {
@@ -521,10 +569,12 @@ EXPORT_SYMBOL(backlight_register_notifier);
  * backlight_unregister_notifier - unregister a backlight notifier
  * @nb: notifier block to unregister
  *
- * @return 0 on success, otherwise a negative error code
- *
  * Register a notifier to get notified when backlight devices get registered
  * or unregistered.
+ *
+ * RETURNS:
+ *
+ * 0 on success, otherwise a negative error code
  */
 int backlight_unregister_notifier(struct notifier_block *nb)
 {
@@ -533,19 +583,21 @@ int backlight_unregister_notifier(struct notifier_block *nb)
 EXPORT_SYMBOL(backlight_unregister_notifier);
 
 /**
- * devm_backlight_device_register - resource managed backlight_device_register()
+ * devm_backlight_device_register - register a new backlight device
  * @dev: the device to register
  * @name: the name of the device
- * @parent: a pointer to the parent device
+ * @parent: a pointer to the parent device (often the same as @dev)
  * @devdata: an optional pointer to be stored for private driver use
  * @ops: the backlight operations structure
  * @props: the backlight properties
  *
- * @return a struct backlight on success, or an ERR_PTR on error
+ * Creates and registers new backlight device. When a backlight device
+ * is registered the configuration must be specified in the @props
+ * parameter. See description of &backlight_properties.
+ *
+ * RETURNS:
  *
- * Managed backlight_device_register(). The backlight_device returned
- * from this function are automatically freed on driver detach.
- * See backlight_device_register() for more information.
+ * struct backlight on success, or an ERR_PTR on error
  */
 struct backlight_device *devm_backlight_device_register(struct device *dev,
        const char *name, struct device *parent, void *devdata,
@@ -573,13 +625,13 @@ struct backlight_device *devm_backlight_device_register(struct device *dev,
 EXPORT_SYMBOL(devm_backlight_device_register);
 
 /**
- * devm_backlight_device_unregister - resource managed backlight_device_unregister()
+ * devm_backlight_device_unregister - unregister backlight device
  * @dev: the device to unregister
  * @bd: the backlight device to unregister
  *
- * Deallocated a backlight allocated with devm_backlight_device_register().
+ * Deallocates a backlight allocated with devm_backlight_device_register().
  * Normally this function will not need to be called and the resource management
- * code will ensure that the resource is freed.
+ * code will ensure that the resources are freed.
  */
 void devm_backlight_device_unregister(struct device *dev,
                                struct backlight_device *bd)
@@ -621,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node)
 EXPORT_SYMBOL(of_find_backlight_by_node);
 #endif
 
-/**
- * of_find_backlight - Get backlight device
- * @dev: Device
- *
- * This function looks for a property named 'backlight' on the DT node
- * connected to @dev and looks up the backlight device.
- *
- * Call backlight_put() to drop the reference on the backlight device.
- *
- * Returns:
- * A pointer to the backlight device if found.
- * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
- * device is found.
- * NULL if there's no backlight property.
- */
-struct backlight_device *of_find_backlight(struct device *dev)
+static struct backlight_device *of_find_backlight(struct device *dev)
 {
        struct backlight_device *bd = NULL;
        struct device_node *np;
@@ -662,20 +699,29 @@ struct backlight_device *of_find_backlight(struct device *dev)
 
        return bd;
 }
-EXPORT_SYMBOL(of_find_backlight);
 
 static void devm_backlight_release(void *data)
 {
-       backlight_put(data);
+       struct backlight_device *bd = data;
+
+       if (bd)
+               put_device(&bd->dev);
 }
 
 /**
- * devm_of_find_backlight - Resource-managed of_find_backlight()
- * @dev: Device
+ * devm_of_find_backlight - find backlight for a device
+ * @dev: the device
  *
- * Device managed version of of_find_backlight().
- * The reference on the backlight device is automatically
+ * This function looks for a property named 'backlight' on the DT node
+ * connected to @dev and looks up the backlight device. The lookup is
+ * device managed so the reference to the backlight device is automatically
  * dropped on driver detach.
+ *
+ * RETURNS:
+ *
+ * A pointer to the backlight device if found.
+ * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight
+ * device is found. NULL if there's no backlight property.
  */
 struct backlight_device *devm_of_find_backlight(struct device *dev)
 {
@@ -687,7 +733,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev)
                return bd;
        ret = devm_add_action(dev, devm_backlight_release, bd);
        if (ret) {
-               backlight_put(bd);
+               put_device(&bd->dev);
                return ERR_PTR(ret);
        }
        return bd;
index d5d5fb4..515184f 100644 (file)
@@ -82,12 +82,7 @@ static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data)
 static int bd6107_backlight_update_status(struct backlight_device *backlight)
 {
        struct bd6107 *bd = bl_get_data(backlight);
-       int brightness = backlight->props.brightness;
-
-       if (backlight->props.power != FB_BLANK_UNBLANK ||
-           backlight->props.fb_blank != FB_BLANK_UNBLANK ||
-           backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
-               brightness = 0;
+       int brightness = backlight_get_brightness(backlight);
 
        if (brightness) {
                bd6107_write(bd, BD6107_PORTSEL, BD6107_PORTSEL_LEDM(2) |
index 25ef0cb..33f5d80 100644 (file)
@@ -420,13 +420,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
 static int corgi_bl_update_status(struct backlight_device *bd)
 {
        struct corgi_lcd *lcd = bl_get_data(bd);
-       int intensity = bd->props.brightness;
-
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               intensity = 0;
+       int intensity = backlight_get_brightness(bd);
 
        if (corgibl_flags & CORGIBL_SUSPENDED)
                intensity = 0;
index 4624b7b..4ad0a72 100644 (file)
@@ -59,26 +59,18 @@ struct cr_panel {
 
 static int cr_backlight_set_intensity(struct backlight_device *bd)
 {
-       int intensity = bd->props.brightness;
        u32 addr = gpio_bar + CRVML_PANEL_PORT;
        u32 cur = inl(addr);
 
-       if (bd->props.power == FB_BLANK_UNBLANK)
-               intensity = FB_BLANK_UNBLANK;
-       if (bd->props.fb_blank == FB_BLANK_UNBLANK)
-               intensity = FB_BLANK_UNBLANK;
-       if (bd->props.power == FB_BLANK_POWERDOWN)
-               intensity = FB_BLANK_POWERDOWN;
-       if (bd->props.fb_blank == FB_BLANK_POWERDOWN)
-               intensity = FB_BLANK_POWERDOWN;
-
-       if (intensity == FB_BLANK_UNBLANK) { /* FULL ON */
-               cur &= ~CRVML_BACKLIGHT_OFF;
-               outl(cur, addr);
-       } else if (intensity == FB_BLANK_POWERDOWN) { /* OFF */
+       if (backlight_get_brightness(bd) == 0) {
+               /* OFF */
                cur |= CRVML_BACKLIGHT_OFF;
                outl(cur, addr);
-       } /* anything else, don't bother */
+       } else {
+               /* FULL ON */
+               cur &= ~CRVML_BACKLIGHT_OFF;
+               outl(cur, addr);
+       }
 
        return 0;
 }
@@ -90,9 +82,9 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
        u8 intensity;
 
        if (cur & CRVML_BACKLIGHT_OFF)
-               intensity = FB_BLANK_POWERDOWN;
+               intensity = 0;
        else
-               intensity = FB_BLANK_UNBLANK;
+               intensity = 1;
 
        return intensity;
 }
index 62540e4..71f21bb 100644 (file)
@@ -77,18 +77,7 @@ static int da903x_backlight_set(struct backlight_device *bl, int brightness)
 
 static int da903x_backlight_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.state & BL_CORE_SUSPENDED)
-               brightness = 0;
-
-       return da903x_backlight_set(bl, brightness);
+       return da903x_backlight_set(bl, backlight_get_brightness(bl));
 }
 
 static int da903x_backlight_get_brightness(struct backlight_device *bl)
index 4149e0b..2387009 100644 (file)
@@ -36,13 +36,7 @@ static int ep93xxbl_set(struct backlight_device *bl, int brightness)
 
 static int ep93xxbl_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       return ep93xxbl_set(bl, brightness);
+       return ep93xxbl_set(bl, backlight_get_brightness(bl));
 }
 
 static int ep93xxbl_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
deleted file mode 100644 (file)
index 8fe63db..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  Generic Backlight Driver
- *
- *  Copyright (c) 2004-2008 Richard Purdie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-
-static int genericbl_intensity;
-static struct backlight_device *generic_backlight_device;
-static struct generic_bl_info *bl_machinfo;
-
-static int genericbl_send_intensity(struct backlight_device *bd)
-{
-       int intensity = bd->props.brightness;
-
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (bd->props.state & BL_CORE_FBBLANK)
-               intensity = 0;
-       if (bd->props.state & BL_CORE_SUSPENDED)
-               intensity = 0;
-
-       bl_machinfo->set_bl_intensity(intensity);
-
-       genericbl_intensity = intensity;
-
-       if (bl_machinfo->kick_battery)
-               bl_machinfo->kick_battery();
-
-       return 0;
-}
-
-static int genericbl_get_intensity(struct backlight_device *bd)
-{
-       return genericbl_intensity;
-}
-
-static const struct backlight_ops genericbl_ops = {
-       .options = BL_CORE_SUSPENDRESUME,
-       .get_brightness = genericbl_get_intensity,
-       .update_status  = genericbl_send_intensity,
-};
-
-static int genericbl_probe(struct platform_device *pdev)
-{
-       struct backlight_properties props;
-       struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev);
-       const char *name = "generic-bl";
-       struct backlight_device *bd;
-
-       bl_machinfo = machinfo;
-       if (!machinfo->limit_mask)
-               machinfo->limit_mask = -1;
-
-       if (machinfo->name)
-               name = machinfo->name;
-
-       memset(&props, 0, sizeof(struct backlight_properties));
-       props.type = BACKLIGHT_RAW;
-       props.max_brightness = machinfo->max_intensity;
-       bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev,
-                                       NULL, &genericbl_ops, &props);
-       if (IS_ERR(bd))
-               return PTR_ERR(bd);
-
-       platform_set_drvdata(pdev, bd);
-
-       bd->props.power = FB_BLANK_UNBLANK;
-       bd->props.brightness = machinfo->default_intensity;
-       backlight_update_status(bd);
-
-       generic_backlight_device = bd;
-
-       dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n");
-       return 0;
-}
-
-static int genericbl_remove(struct platform_device *pdev)
-{
-       struct backlight_device *bd = platform_get_drvdata(pdev);
-
-       bd->props.power = 0;
-       bd->props.brightness = 0;
-       backlight_update_status(bd);
-
-       dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n");
-       return 0;
-}
-
-static struct platform_driver genericbl_driver = {
-       .probe          = genericbl_probe,
-       .remove         = genericbl_remove,
-       .driver         = {
-               .name   = "generic-bl",
-       },
-};
-
-module_platform_driver(genericbl_driver);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Generic Backlight Driver");
-MODULE_LICENSE("GPL");
index 75409dd..6f78d92 100644 (file)
@@ -21,24 +21,11 @@ struct gpio_backlight {
        struct gpio_desc *gpiod;
 };
 
-static int gpio_backlight_get_next_brightness(struct backlight_device *bl)
-{
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK ||
-           bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
-               brightness = 0;
-
-       return brightness;
-}
-
 static int gpio_backlight_update_status(struct backlight_device *bl)
 {
        struct gpio_backlight *gbl = bl_get_data(bl);
-       int brightness = gpio_backlight_get_next_brightness(bl);
 
-       gpiod_set_value_cansleep(gbl->gpiod, brightness);
+       gpiod_set_value_cansleep(gbl->gpiod, backlight_get_brightness(bl));
 
        return 0;
 }
@@ -108,7 +95,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
 
        bl->props.brightness = 1;
 
-       init_brightness = gpio_backlight_get_next_brightness(bl);
+       init_brightness = backlight_get_brightness(bl);
        ret = gpiod_direction_output(gbl->gpiod, init_brightness);
        if (ret) {
                dev_err(dev, "failed to set initial brightness\n");
index 8ea42b8..9123c33 100644 (file)
@@ -33,12 +33,8 @@ static void hp680bl_send_intensity(struct backlight_device *bd)
 {
        unsigned long flags;
        u16 v;
-       int intensity = bd->props.brightness;
+       int intensity = backlight_get_brightness(bd);
 
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               intensity = 0;
        if (hp680bl_suspended)
                intensity = 0;
 
index 9c5aa3f..328aba9 100644 (file)
  *     lower frequency when the registers are read/written.
  *     The macro sets the frequency in the spi_transfer structure if
  *     the frequency exceeds the maximum value.
+ * @s: pointer to an SPI device
+ * @x: pointer to the read/write buffer pair
  */
 #define CHECK_FREQ_REG(s, x)   \
        do {                    \
 
 #define set_tx_byte(b)         (tx_invert ? ~(b) : b)
 
-/**
+/*
  * ili922x_id - id as set by manufacturer
  */
 static int ili922x_id = 1;
@@ -130,7 +132,7 @@ module_param(ili922x_id, int, 0);
 static int tx_invert;
 module_param(tx_invert, int, 0);
 
-/**
+/*
  * driver's private structure
  */
 struct ili922x {
@@ -293,6 +295,8 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
 #ifdef DEBUG
 /**
  * ili922x_reg_dump - dump all registers
+ *
+ * @spi: pointer to an SPI device
  */
 static void ili922x_reg_dump(struct spi_device *spi)
 {
index f0385f9..996f7ba 100644 (file)
@@ -54,7 +54,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
        jornada_ssp_start();
 
        /* If backlight is off then really turn it off */
-       if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
+       if (backlight_is_blank(bd)) {
                ret = jornada_ssp_byte(BRIGHTNESSOFF);
                if (ret != TXDUMMY) {
                        dev_info(&bd->dev, "brightness off timeout\n");
index 1dfe13c..55794b2 100644 (file)
@@ -87,12 +87,8 @@ static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
 
 static int kb3886bl_send_intensity(struct backlight_device *bd)
 {
-       int intensity = bd->props.brightness;
+       int intensity = backlight_get_brightness(bd);
 
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               intensity = 0;
        if (kb3886bl_flags & KB3886BL_SUSPENDED)
                intensity = 0;
 
index 78b0333..db56e46 100644 (file)
@@ -179,6 +179,7 @@ ATTRIBUTE_GROUPS(lcd_device);
  * lcd_device_register - register a new object of lcd_device class.
  * @name: the name of the new object(must be the same as the name of the
  *   respective framebuffer device).
+ * @parent: pointer to the parent's struct device .
  * @devdata: an optional pointer to be stored in the device. The
  *   methods may retrieve it by using lcd_get_data(ld).
  * @ops: the lcd operations structure.
index 3f66549..f54d256 100644 (file)
@@ -54,12 +54,7 @@ static void led_bl_power_off(struct led_bl_data *priv)
 static int led_bl_update_status(struct backlight_device *bl)
 {
        struct led_bl_data *priv = bl_get_data(bl);
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK ||
-           bl->props.state & BL_CORE_FBBLANK)
-               brightness = 0;
+       int brightness = backlight_get_brightness(bl);
 
        if (brightness > 0)
                led_bl_set_brightness(priv, brightness);
index ee09d1b..1df1b66 100644 (file)
@@ -39,14 +39,8 @@ static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
 static int lm3533_bl_update_status(struct backlight_device *bd)
 {
        struct lm3533_bl *bl = bl_get_data(bd);
-       int brightness = bd->props.brightness;
 
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+       return lm3533_ctrlbank_set_brightness(&bl->cb, backlight_get_brightness(bd));
 }
 
 static int lm3533_bl_get_brightness(struct backlight_device *bd)
@@ -235,7 +229,7 @@ static struct attribute *lm3533_bl_attributes[] = {
 static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
                                             struct attribute *attr, int n)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct lm3533_bl *bl = dev_get_drvdata(dev);
        umode_t mode = attr->mode;
 
index ee32088..e88a2b0 100644 (file)
@@ -391,7 +391,7 @@ static int lm3630a_parse_led_sources(struct fwnode_handle *node,
                return ret;
 
        for (i = 0; i < num_sources; i++) {
-               if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1)
+               if (sources[i] != LM3630A_SINK_0 && sources[i] != LM3630A_SINK_1)
                        return -EINVAL;
 
                ret |= BIT(sources[i]);
@@ -412,7 +412,7 @@ static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata,
        if (ret)
                return ret;
 
-       if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1)
+       if (bank != LM3630A_BANK_0 && bank != LM3630A_BANK_1)
                return -EINVAL;
 
        led_sources = lm3630a_parse_led_sources(node, BIT(bank));
index 8ae32e3..f949b66 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
 #include <linux/lcd.h>
 #include <linux/module.h>
 #include <linux/spi/spi.h>
@@ -89,14 +88,6 @@ static const unsigned char seq_rgb_gamma[] = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 };
 
-static const unsigned char seq_up_dn[] = {
-       0x36, 0x10,
-};
-
-static const unsigned char seq_sleep_in[] = {
-       0x10,
-};
-
 static const unsigned char seq_sleep_out[] = {
        0x11,
 };
index cdc02e0..297ee2e 100644 (file)
@@ -111,12 +111,8 @@ static int current_intensity;
 
 static int locomolcd_set_intensity(struct backlight_device *bd)
 {
-       int intensity = bd->props.brightness;
+       int intensity = backlight_get_brightness(bd);
 
-       if (bd->props.power != FB_BLANK_UNBLANK)
-               intensity = 0;
-       if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-               intensity = 0;
        if (locomolcd_flags & LOCOMOLCD_SUSPENDED)
                intensity = 0;
 
index c6ad73a..1842ae9 100644 (file)
@@ -46,12 +46,7 @@ static int lv5207lp_write(struct lv5207lp *lv, u8 reg, u8 data)
 static int lv5207lp_backlight_update_status(struct backlight_device *backlight)
 {
        struct lv5207lp *lv = bl_get_data(backlight);
-       int brightness = backlight->props.brightness;
-
-       if (backlight->props.power != FB_BLANK_UNBLANK ||
-           backlight->props.fb_blank != FB_BLANK_UNBLANK ||
-           backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
-               brightness = 0;
+       int brightness = backlight_get_brightness(backlight);
 
        if (brightness) {
                lv5207lp_write(lv, LV5207LP_CTRL1,
index 97cc260..e607ec6 100644 (file)
@@ -64,18 +64,7 @@ out:
 
 static int max8925_backlight_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.state & BL_CORE_SUSPENDED)
-               brightness = 0;
-
-       return max8925_backlight_set(bl, brightness);
+       return max8925_backlight_set(bl, backlight_get_brightness(bl));
 }
 
 static int max8925_backlight_get_brightness(struct backlight_device *bl)
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
deleted file mode 100644 (file)
index 23ee710..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Bachmann electronic GmbH
- *     Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * Backlight driver for ot200 visualisation device from
- * Bachmann electronic GmbH.
- */
-
-#include <linux/module.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/cs5535.h>
-
-static struct cs5535_mfgpt_timer *pwm_timer;
-
-/* this array defines the mapping of brightness in % to pwm frequency */
-static const u8 dim_table[101] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
-                                 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
-                                 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9,
-                                 10, 10, 11, 11, 12, 12, 13, 14, 15, 15, 16,
-                                 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28,
-                                 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 50,
-                                 53, 55, 58, 61, 65, 68, 72, 75, 79, 84, 88,
-                                 93, 97, 103, 108, 114, 120, 126, 133, 140,
-                                 147, 155, 163};
-
-struct ot200_backlight_data {
-       int current_brightness;
-};
-
-#define GPIO_DIMM      27
-#define SCALE          1
-#define CMP1MODE       0x2     /* compare on GE; output high on compare
-                                * greater than or equal */
-#define PWM_SETUP      (SCALE | CMP1MODE << 6 | MFGPT_SETUP_CNTEN)
-#define MAX_COMP2      163
-
-static int ot200_backlight_update_status(struct backlight_device *bl)
-{
-       struct ot200_backlight_data *data = bl_get_data(bl);
-       int brightness = bl->props.brightness;
-
-       if (bl->props.state & BL_CORE_FBBLANK)
-               brightness = 0;
-
-       /* enable or disable PWM timer */
-       if (brightness == 0)
-               cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, 0);
-       else if (data->current_brightness == 0) {
-               cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
-               cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP,
-                       MFGPT_SETUP_CNTEN);
-       }
-
-       /* apply new brightness value */
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
-               MAX_COMP2 - dim_table[brightness]);
-       data->current_brightness = brightness;
-
-       return 0;
-}
-
-static int ot200_backlight_get_brightness(struct backlight_device *bl)
-{
-       struct ot200_backlight_data *data = bl_get_data(bl);
-       return data->current_brightness;
-}
-
-static const struct backlight_ops ot200_backlight_ops = {
-       .update_status  = ot200_backlight_update_status,
-       .get_brightness = ot200_backlight_get_brightness,
-};
-
-static int ot200_backlight_probe(struct platform_device *pdev)
-{
-       struct backlight_device *bl;
-       struct ot200_backlight_data *data;
-       struct backlight_properties props;
-       int retval = 0;
-
-       /* request gpio */
-       if (devm_gpio_request(&pdev->dev, GPIO_DIMM,
-                               "ot200 backlight dimmer") < 0) {
-               dev_err(&pdev->dev, "failed to request GPIO %d\n", GPIO_DIMM);
-               return -ENODEV;
-       }
-
-       /* request timer */
-       pwm_timer = cs5535_mfgpt_alloc_timer(7, MFGPT_DOMAIN_ANY);
-       if (!pwm_timer) {
-               dev_err(&pdev->dev, "MFGPT 7 not available\n");
-               return -ENODEV;
-       }
-
-       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
-       if (!data) {
-               retval = -ENOMEM;
-               goto error_devm_kzalloc;
-       }
-
-       /* setup gpio */
-       cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_ENABLE);
-       cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_AUX1);
-
-       /* setup timer */
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, 0);
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP2, MAX_COMP2);
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, PWM_SETUP);
-
-       data->current_brightness = 100;
-       props.max_brightness = 100;
-       props.brightness = 100;
-       props.type = BACKLIGHT_RAW;
-
-       bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
-                                       &pdev->dev, data, &ot200_backlight_ops,
-                                       &props);
-       if (IS_ERR(bl)) {
-               dev_err(&pdev->dev, "failed to register backlight\n");
-               retval = PTR_ERR(bl);
-               goto error_devm_kzalloc;
-       }
-
-       platform_set_drvdata(pdev, bl);
-
-       return 0;
-
-error_devm_kzalloc:
-       cs5535_mfgpt_free_timer(pwm_timer);
-       return retval;
-}
-
-static int ot200_backlight_remove(struct platform_device *pdev)
-{
-       /* on module unload set brightness to 100% */
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0);
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
-       cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1,
-               MAX_COMP2 - dim_table[100]);
-
-       cs5535_mfgpt_free_timer(pwm_timer);
-
-       return 0;
-}
-
-static struct platform_driver ot200_backlight_driver = {
-       .driver         = {
-               .name   = "ot200-backlight",
-       },
-       .probe          = ot200_backlight_probe,
-       .remove         = ot200_backlight_remove,
-};
-
-module_platform_driver(ot200_backlight_driver);
-
-MODULE_DESCRIPTION("backlight driver for ot200 visualisation device");
-MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ot200-backlight");
index 82b8d75..dfc7608 100644 (file)
@@ -108,14 +108,9 @@ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
 static int pwm_backlight_update_status(struct backlight_device *bl)
 {
        struct pwm_bl_data *pb = bl_get_data(bl);
-       int brightness = bl->props.brightness;
+       int brightness = backlight_get_brightness(bl);
        struct pwm_state state;
 
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK ||
-           bl->props.state & BL_CORE_FBBLANK)
-               brightness = 0;
-
        if (pb->notify)
                brightness = pb->notify(pb->dev, brightness);
 
@@ -606,7 +601,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                pb->scale = data->max_brightness;
        }
 
-       pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
+       pb->lth_brightness = data->lth_brightness * (div_u64(state.period,
+                               pb->scale));
 
        props.type = BACKLIGHT_RAW;
        props.max_brightness = data->max_brightness;
index 4c8c34b..3bc7800 100644 (file)
@@ -433,14 +433,9 @@ static int wled5_ovp_delay(struct wled *wled)
 static int wled_update_status(struct backlight_device *bl)
 {
        struct wled *wled = bl_get_data(bl);
-       u16 brightness = bl->props.brightness;
+       u16 brightness = backlight_get_brightness(bl);
        int rc = 0;
 
-       if (bl->props.power != FB_BLANK_UNBLANK ||
-           bl->props.fb_blank != FB_BLANK_UNBLANK ||
-           bl->props.state & BL_CORE_FBBLANK)
-               brightness = 0;
-
        mutex_lock(&wled->lock);
        if (brightness) {
                rc = wled->wled_set_brightness(wled, brightness);
@@ -1287,14 +1282,6 @@ static const struct wled_var_cfg wled4_string_i_limit_cfg = {
        .size = ARRAY_SIZE(wled4_string_i_limit_values),
 };
 
-static const struct wled_var_cfg wled3_string_cfg = {
-       .size = 8,
-};
-
-static const struct wled_var_cfg wled4_string_cfg = {
-       .size = 16,
-};
-
 static const struct wled_var_cfg wled5_mod_sel_cfg = {
        .size = 2,
 };
index 2355f00..0ce1815 100644 (file)
@@ -8,15 +8,13 @@
 
 #include <linux/backlight.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
-#include <linux/platform_data/sky81452-backlight.h>
 #include <linux/slab.h>
 
 /* registers */
 #define SKY81452_DEFAULT_NAME "lcd-backlight"
 #define SKY81452_MAX_BRIGHTNESS        (SKY81452_CS + 1)
 
+/**
+ * struct sky81452_platform_data
+ * @name:      backlight driver name.
+ *             If it is not defined, default name is lcd-backlight.
+ * @gpiod_enable:GPIO descriptor which control EN pin
+ * @enable:    Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm:        true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detection_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit:       It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+       const char *name;
+       struct gpio_desc *gpiod_enable;
+       unsigned int enable;
+       bool ignore_pwm;
+       bool dpwm_mode;
+       bool phase_shift;
+       unsigned int short_detection_threshold;
+       unsigned int boost_current_limit;
+};
+
 #define CTZ(b) __builtin_ctz(b)
 
 static int sky81452_bl_update_status(struct backlight_device *bd)
@@ -182,7 +203,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
        pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm");
        pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode");
        pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift");
-       pdata->gpio_enable = of_get_gpio(np, 0);
+       pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
 
        ret = of_property_count_u32_elems(np, "led-sources");
        if (ret < 0) {
@@ -252,26 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct regmap *regmap = dev_get_drvdata(dev->parent);
-       struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev);
+       struct sky81452_bl_platform_data *pdata;
        struct backlight_device *bd;
        struct backlight_properties props;
        const char *name;
        int ret;
 
-       if (!pdata) {
-               pdata = sky81452_bl_parse_dt(dev);
-               if (IS_ERR(pdata))
-                       return PTR_ERR(pdata);
-       }
-
-       if (gpio_is_valid(pdata->gpio_enable)) {
-               ret = devm_gpio_request_one(dev, pdata->gpio_enable,
-                                       GPIOF_OUT_INIT_HIGH, "sky81452-en");
-               if (ret < 0) {
-                       dev_err(dev, "failed to request GPIO. err=%d\n", ret);
-                       return ret;
-               }
-       }
+       pdata = sky81452_bl_parse_dt(dev);
+       if (IS_ERR(pdata))
+               return PTR_ERR(pdata);
 
        ret = sky81452_bl_init_device(regmap, pdata);
        if (ret < 0) {
@@ -312,8 +322,8 @@ static int sky81452_bl_remove(struct platform_device *pdev)
        bd->props.brightness = 0;
        backlight_update_status(bd);
 
-       if (gpio_is_valid(pdata->gpio_enable))
-               gpio_set_value_cansleep(pdata->gpio_enable, 0);
+       if (pdata->gpiod_enable)
+               gpiod_set_value_cansleep(pdata->gpiod_enable, 0);
 
        return 0;
 }
index 762e3fe..8457166 100644 (file)
@@ -77,15 +77,7 @@ static int tps65217_bl_update_status(struct backlight_device *bl)
 {
        struct tps65217_bl *tps65217_bl = bl_get_data(bl);
        int rc;
-       int brightness = bl->props.brightness;
-
-       if (bl->props.state & BL_CORE_SUSPENDED)
-               brightness = 0;
-
-       if ((bl->props.power != FB_BLANK_UNBLANK) ||
-               (bl->props.fb_blank != FB_BLANK_UNBLANK))
-               /* framebuffer in low power mode or blanking active */
-               brightness = 0;
+       int brightness = backlight_get_brightness(bl);
 
        if (brightness > 0) {
                rc = tps65217_reg_write(tps65217_bl->tps,
index e55977d..c5aaee2 100644 (file)
@@ -91,18 +91,7 @@ err:
 
 static int wm831x_backlight_update_status(struct backlight_device *bl)
 {
-       int brightness = bl->props.brightness;
-
-       if (bl->props.power != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.fb_blank != FB_BLANK_UNBLANK)
-               brightness = 0;
-
-       if (bl->props.state & BL_CORE_SUSPENDED)
-               brightness = 0;
-
-       return wm831x_backlight_set(bl, brightness);
+       return wm831x_backlight_set(bl, backlight_get_brightness(bl));
 }
 
 static int wm831x_backlight_get_brightness(struct backlight_device *bl)
index 30e73ec..da7c88f 100644 (file)
@@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
 int
 fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
 {
-       int flags = info->flags;
        int ret = 0;
        u32 activate;
        struct fb_var_screeninfo old_var;
@@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
        event.data = &mode;
        fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
 
-       if (flags & FBINFO_MISC_USEREVENT)
-               fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
-
        return 0;
 }
 EXPORT_SYMBOL(fb_set_var);
@@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
                        return -EFAULT;
                console_lock();
                lock_fb_info(info);
-               info->flags |= FBINFO_MISC_USEREVENT;
                ret = fb_set_var(info, &var);
-               info->flags &= ~FBINFO_MISC_USEREVENT;
+               if (!ret)
+                       fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
                unlock_fb_info(info);
                console_unlock();
                if (!ret && copy_to_user(argp, &var, sizeof(var)))
index d54c88f..65dae05 100644 (file)
@@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
 
        var->activate |= FB_ACTIVATE_FORCE;
        console_lock();
-       fb_info->flags |= FBINFO_MISC_USEREVENT;
        err = fb_set_var(fb_info, var);
-       fb_info->flags &= ~FBINFO_MISC_USEREVENT;
+       if (!err)
+               fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
        console_unlock();
        if (err)
                return err;
index 65491ae..e57c008 100644 (file)
@@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       if (efi_enabled(EFI_BOOT) &&
+       if (efi_enabled(EFI_MEMMAP) &&
            !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
                if ((efifb_fix.smem_start + efifb_fix.smem_len) >
                    (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
index 9df78fb..203c254 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/freezer.h>
 #include <linux/uaccess.h>
 #include <linux/fb.h>
+#include <linux/fbcon.h>
 #include <linux/init.h>
 
 #include <asm/cell-regs.h>
@@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
                                var = info->var;
                                fb_videomode_to_var(&var, vmode);
                                console_lock();
-                               info->flags |= FBINFO_MISC_USEREVENT;
                                /* Force, in case only special bits changed */
                                var.activate |= FB_ACTIVATE_FORCE;
                                par->new_mode_id = val;
                                retval = fb_set_var(info, &var);
-                               info->flags &= ~FBINFO_MISC_USEREVENT;
+                               if (!retval)
+                                       fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
                                console_unlock();
                        }
                        break;
index 8e06ba9..09425ec 100644 (file)
@@ -312,7 +312,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
                /* Enable the PWM */
                pwm_enable(par->pwm);
 
-               dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n",
+               dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n",
                        par->pwm->pwm, pwm_get_period(par->pwm));
        }
 
index 8be02f3..31cc97f 100644 (file)
@@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb)
        s64 target;
        u32 num_pages;
 
-       virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
-                    &num_pages);
-
        /* Legacy balloon config space is LE, unlike all other devices. */
-       if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
-               num_pages = le32_to_cpu((__force __le32)num_pages);
+       virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+                       &num_pages);
 
        target = num_pages;
        return target - vb->num_pages;
@@ -462,11 +459,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
        u32 actual = vb->num_pages;
 
        /* Legacy balloon config space is LE, unlike all other devices. */
-       if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
-               actual = (__force u32)cpu_to_le32(actual);
-
-       virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
-                     &actual);
+       virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
+                        &actual);
 }
 
 static void update_balloon_stats_func(struct work_struct *work)
@@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
 {
        if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
                               &vb->config_read_bitmap)) {
-               virtio_cread(vb->vdev, struct virtio_balloon_config,
-                            free_page_hint_cmd_id,
-                            &vb->cmd_id_received_cache);
                /* Legacy balloon config space is LE, unlike all other devices. */
-               if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
-                       vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+               virtio_cread_le(vb->vdev, struct virtio_balloon_config,
+                               free_page_hint_cmd_id,
+                               &vb->cmd_id_received_cache);
        }
 
        return vb->cmd_id_received_cache;
@@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
        while (virtqueue_get_buf(vq, &unused))
                ;
 
-       vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
+       vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
                                        virtio_balloon_cmd_id_received(vb));
        sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
        err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
@@ -987,8 +979,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
                if (!want_init_on_free())
                        memset(&poison_val, PAGE_POISON, sizeof(poison_val));
 
-               virtio_cwrite(vb->vdev, struct virtio_balloon_config,
-                             poison_val, &poison_val);
+               virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
+                                poison_val, &poison_val);
        }
 
        vb->pr_dev_info.report = virtballoon_free_page_report;
@@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev)
        else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
                __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
 
-       __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+       __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
        return 0;
 }
 
index efaf65b..877b2ea 100644 (file)
@@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi,
 {
        u8 size;
 
-       virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
-       virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
-       virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+       virtio_cwrite_le(vi->vdev, struct virtio_input_config, select, &select);
+       virtio_cwrite_le(vi->vdev, struct virtio_input_config, subsel, &subsel);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, size, &size);
        return size;
 }
 
@@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
        u32 mi, ma, re, fu, fl;
 
        virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
-       virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
-       virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
-       virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
-       virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
-       virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+       virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
        input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
        input_abs_set_res(vi->idev, abs, re);
 }
@@ -244,14 +244,14 @@ static int virtinput_probe(struct virtio_device *vdev)
 
        size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
        if (size >= sizeof(struct virtio_input_devids)) {
-               virtio_cread(vi->vdev, struct virtio_input_config,
-                            u.ids.bustype, &vi->idev->id.bustype);
-               virtio_cread(vi->vdev, struct virtio_input_config,
-                            u.ids.vendor, &vi->idev->id.vendor);
-               virtio_cread(vi->vdev, struct virtio_input_config,
-                            u.ids.product, &vi->idev->id.product);
-               virtio_cread(vi->vdev, struct virtio_input_config,
-                            u.ids.version, &vi->idev->id.version);
+               virtio_cread_le(vi->vdev, struct virtio_input_config,
+                               u.ids.bustype, &vi->idev->id.bustype);
+               virtio_cread_le(vi->vdev, struct virtio_input_config,
+                               u.ids.vendor, &vi->idev->id.vendor);
+               virtio_cread_le(vi->vdev, struct virtio_input_config,
+                               u.ids.product, &vi->idev->id.product);
+               virtio_cread_le(vi->vdev, struct virtio_input_config,
+                               u.ids.version, &vi->idev->id.version);
        } else {
                vi->idev->id.bustype = BUS_VIRTUAL;
        }
index f26f5f6..c08512f 100644 (file)
@@ -1530,21 +1530,21 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
        uint64_t new_plugged_size, usable_region_size, end_addr;
 
        /* the plugged_size is just a reflection of what _we_ did previously */
-       virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
-                    &new_plugged_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+                       &new_plugged_size);
        if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
                vm->plugged_size = new_plugged_size;
 
        /* calculate the last usable memory block id */
-       virtio_cread(vm->vdev, struct virtio_mem_config,
-                    usable_region_size, &usable_region_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config,
+                       usable_region_size, &usable_region_size);
        end_addr = vm->addr + usable_region_size;
        end_addr = min(end_addr, phys_limit);
        vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
 
        /* see if there is a request to change the size */
-       virtio_cread(vm->vdev, struct virtio_mem_config, requested_size,
-                    &vm->requested_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
+                       &vm->requested_size);
 
        dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
        dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
@@ -1677,16 +1677,16 @@ static int virtio_mem_init(struct virtio_mem *vm)
        }
 
        /* Fetch all properties that can't change. */
-       virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
-                    &vm->plugged_size);
-       virtio_cread(vm->vdev, struct virtio_mem_config, block_size,
-                    &vm->device_block_size);
-       virtio_cread(vm->vdev, struct virtio_mem_config, node_id,
-                    &node_id);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+                       &vm->plugged_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
+                       &vm->device_block_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
+                       &node_id);
        vm->nid = virtio_mem_translate_node_id(vm, node_id);
-       virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
-       virtio_cread(vm->vdev, struct virtio_mem_config, region_size,
-                    &vm->region_size);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+       virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
+                       &vm->region_size);
 
        /*
         * We always hotplug memory in memory block granularity. This way,
index db93ced..3e14e70 100644 (file)
  * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
  * for 16-bit fields and 8-bit accesses for 8-bit fields.
  */
-static inline u8 vp_ioread8(u8 __iomem *addr)
+static inline u8 vp_ioread8(const u8 __iomem *addr)
 {
        return ioread8(addr);
 }
-static inline u16 vp_ioread16 (__le16 __iomem *addr)
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
 {
        return ioread16(addr);
 }
 
-static inline u32 vp_ioread32(__le32 __iomem *addr)
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
 {
        return ioread32(addr);
 }
@@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
  * @dev: the pci device
  * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
  * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
+ * @bars: the bitmask of BARs
  *
  * Returns offset of the capability, or 0.
  */
index a2de775..becc776 100644 (file)
@@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
 
 static bool vring_use_dma_api(struct virtio_device *vdev)
 {
-       if (!virtio_has_iommu_quirk(vdev))
+       if (!virtio_has_dma_quirk(vdev))
                return true;
 
        /* Otherwise, we are left to guess. */
@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
 {
        struct vring_virtqueue *vq = to_vvq(_vq);
 
+       if (unlikely(vq->broken))
+               return false;
+
        virtio_mb(vq->weak_barriers);
        return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
                                 virtqueue_poll_split(_vq, last_used_idx);
@@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev)
                        break;
                case VIRTIO_F_VERSION_1:
                        break;
-               case VIRTIO_F_IOMMU_PLATFORM:
+               case VIRTIO_F_ACCESS_PLATFORM:
                        break;
                case VIRTIO_F_RING_PACKED:
                        break;
index c30eb55..4a9ddb4 100644 (file)
@@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
                            void *buf, unsigned len)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
-       const struct vdpa_config_ops *ops = vdpa->config;
 
-       ops->get_config(vdpa, offset, buf, len);
+       vdpa_get_config(vdpa, offset, buf, len);
 }
 
 static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
@@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
 static void virtio_vdpa_reset(struct virtio_device *vdev)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
-       const struct vdpa_config_ops *ops = vdpa->config;
 
-       return ops->set_status(vdpa, 0);
+       vdpa_reset(vdpa);
 }
 
 static bool virtio_vdpa_notify(struct virtqueue *vq)
@@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
 static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
-       const struct vdpa_config_ops *ops = vdpa->config;
 
        /* Give virtio_ring a chance to accept features. */
        vring_transport_features(vdev);
 
-       return ops->set_features(vdpa, vdev->features);
+       return vdpa_set_features(vdpa, vdev->features);
 }
 
 static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
index 4f4687c..ab7aad5 100644 (file)
@@ -1027,7 +1027,7 @@ config ADVANTECH_WDT
          If you are configuring a Linux kernel for the Advantech single-board
          computer, say `Y' here to support its built-in watchdog timer
          feature. More information can be found at
-         <http://www.advantech.com.tw/products/>
+         <https://www.advantech.com.tw/products/>
 
 config ALIM1535_WDT
        tristate "ALi M1535 PMU Watchdog Timer"
index 0e4c18a..554fe85 100644 (file)
@@ -177,7 +177,7 @@ static long advwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (advwdt_set_heartbeat(new_timeout))
                        return -EINVAL;
                advwdt_ping();
-               /* fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index 42338c7..bfb9a91 100644 (file)
@@ -220,7 +220,7 @@ static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return -EINVAL;
                ali_keepalive();
        }
-               /* fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index 5af0358..4ff7f5a 100644 (file)
@@ -279,7 +279,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                timeout = new_timeout;
                wdt_keepalive();
        }
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index c087027..ff37dc9 100644 (file)
@@ -235,8 +235,7 @@ static long ar7_wdt_ioctl(struct file *file,
                ar7_wdt_update_margin(new_margin);
                ar7_wdt_kick(1);
                spin_unlock(&wdt_lock);
-               /* Fall through */
-
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                if (put_user(margin, (int *)arg))
                        return -EFAULT;
index d6dff97..0f18f06 100644 (file)
@@ -215,8 +215,8 @@ static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
                err = ath79_wdt_set_timeout(t);
                if (err)
                        break;
+               fallthrough;
 
-               /* fallthrough */
        case WDIOC_GETTIMEOUT:
                err = put_user(timeout, p);
                break;
index eb850a8..8237c4e 100644 (file)
@@ -279,7 +279,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
 
        wdt->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(wdt->base))
-               return -ENODEV;
+               return PTR_ERR(wdt->base);
 
        wdt->resolution = SECWDOG_DEFAULT_RESOLUTION;
        ret = bcm_kona_wdt_set_resolution_reg(wdt);
index 9d09bbf..7817fb9 100644 (file)
@@ -39,6 +39,11 @@ static bool booke_wdt_enabled;
 module_param(booke_wdt_enabled, bool, 0);
 static int  booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
 module_param(booke_wdt_period, int, 0);
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+               "Watchdog cannot be stopped once started (default="
+                               __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
 #ifdef CONFIG_PPC_FSL_BOOK3E
 
@@ -215,7 +220,6 @@ static void __exit booke_wdt_exit(void)
 static int __init booke_wdt_init(void)
 {
        int ret = 0;
-       bool nowayout = WATCHDOG_NOWAYOUT;
 
        pr_info("powerpc book-e watchdog driver loaded\n");
        booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
index fba21de..32d0e17 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright 2010-2011 Picochip Ltd., Jamie Iles
- * http://www.picochip.com
+ * https://www.picochip.com
  *
  * This file implements a driver for the Synopsys DesignWare watchdog device
  * in the many subsystems. The watchdog has 16 different timeout periods
@@ -13,6 +13,8 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/limits.h>
+#include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/watchdog.h>
+#include <linux/debugfs.h>
 
 #define WDOG_CONTROL_REG_OFFSET                    0x00
 #define WDOG_CONTROL_REG_WDT_EN_MASK       0x01
 #define WDOG_CURRENT_COUNT_REG_OFFSET      0x08
 #define WDOG_COUNTER_RESTART_REG_OFFSET     0x0c
 #define WDOG_COUNTER_RESTART_KICK_VALUE            0x76
-
-/* The maximum TOP (timeout period) value that can be set in the watchdog. */
-#define DW_WDT_MAX_TOP         15
+#define WDOG_INTERRUPT_STATUS_REG_OFFSET    0x10
+#define WDOG_INTERRUPT_CLEAR_REG_OFFSET     0x14
+#define WDOG_COMP_PARAMS_5_REG_OFFSET       0xe4
+#define WDOG_COMP_PARAMS_4_REG_OFFSET       0xe8
+#define WDOG_COMP_PARAMS_3_REG_OFFSET       0xec
+#define WDOG_COMP_PARAMS_2_REG_OFFSET       0xf0
+#define WDOG_COMP_PARAMS_1_REG_OFFSET       0xf4
+#define WDOG_COMP_PARAMS_1_USE_FIX_TOP      BIT(6)
+#define WDOG_COMP_VERSION_REG_OFFSET        0xf8
+#define WDOG_COMP_TYPE_REG_OFFSET           0xfc
+
+/* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */
+#define DW_WDT_NUM_TOPS                16
+#define DW_WDT_FIX_TOP(_idx)   (1U << (16 + _idx))
 
 #define DW_WDT_DEFAULT_SECONDS 30
 
+static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = {
+       DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2),
+       DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5),
+       DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8),
+       DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11),
+       DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14),
+       DW_WDT_FIX_TOP(15)
+};
+
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
                 "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+enum dw_wdt_rmod {
+       DW_WDT_RMOD_RESET = 1,
+       DW_WDT_RMOD_IRQ = 2
+};
+
+struct dw_wdt_timeout {
+       u32 top_val;
+       unsigned int sec;
+       unsigned int msec;
+};
+
 struct dw_wdt {
        void __iomem            *regs;
        struct clk              *clk;
+       struct clk              *pclk;
        unsigned long           rate;
+       enum dw_wdt_rmod        rmod;
+       struct dw_wdt_timeout   timeouts[DW_WDT_NUM_TOPS];
        struct watchdog_device  wdd;
        struct reset_control    *rst;
        /* Save/restore */
        u32                     control;
        u32                     timeout;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *dbgfs_dir;
+#endif
 };
 
 #define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
@@ -64,20 +106,84 @@ static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
                WDOG_CONTROL_REG_WDT_EN_MASK;
 }
 
-static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top)
+static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod)
 {
+       u32 val;
+
+       val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+       if (rmod == DW_WDT_RMOD_IRQ)
+               val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+       else
+               val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+       writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+       dw_wdt->rmod = rmod;
+}
+
+static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt,
+                                        unsigned int timeout, u32 *top_val)
+{
+       int idx;
+
        /*
-        * There are 16 possible timeout values in 0..15 where the number of
-        * cycles is 2 ^ (16 + i) and the watchdog counts down.
+        * Find a TOP with timeout greater or equal to the requested number.
+        * Note we'll select a TOP with maximum timeout if the requested
+        * timeout couldn't be reached.
         */
-       return (1U << (16 + top)) / dw_wdt->rate;
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].sec >= timeout)
+                       break;
+       }
+
+       if (idx == DW_WDT_NUM_TOPS)
+               --idx;
+
+       *top_val = dw_wdt->timeouts[idx].top_val;
+
+       return dw_wdt->timeouts[idx].sec;
+}
+
+static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt)
+{
+       int idx;
+
+       /*
+        * We'll find a timeout greater or equal to one second anyway because
+        * the driver probe would have failed if there was none.
+        */
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].sec)
+                       break;
+       }
+
+       return dw_wdt->timeouts[idx].sec;
 }
 
-static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
+static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt)
 {
-       int top = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+       struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1];
+       u64 msec;
+
+       msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec;
 
-       return dw_wdt_top_in_seconds(dw_wdt, top);
+       return msec < UINT_MAX ? msec : UINT_MAX;
+}
+
+static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt)
+{
+       int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+       int idx;
+
+       for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) {
+               if (dw_wdt->timeouts[idx].top_val == top_val)
+                       break;
+       }
+
+       /*
+        * In IRQ mode due to the two stages counter, the actual timeout is
+        * twice greater than the TOP setting.
+        */
+       return dw_wdt->timeouts[idx].sec * dw_wdt->rmod;
 }
 
 static int dw_wdt_ping(struct watchdog_device *wdd)
@@ -93,17 +199,23 @@ static int dw_wdt_ping(struct watchdog_device *wdd)
 static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
 {
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
-       int i, top_val = DW_WDT_MAX_TOP;
+       unsigned int timeout;
+       u32 top_val;
 
        /*
-        * Iterate over the timeout values until we find the closest match. We
-        * always look for >=.
+        * Note IRQ mode being enabled means having a non-zero pre-timeout
+        * setup. In this case we try to find a TOP as close to the half of the
+        * requested timeout as possible since DW Watchdog IRQ mode is designed
+        * in two stages way - first timeout rises the pre-timeout interrupt,
+        * second timeout performs the system reset. So basically the effective
+        * watchdog-caused reset happens after two watchdog TOPs elapsed.
         */
-       for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
-               if (dw_wdt_top_in_seconds(dw_wdt, i) >= top_s) {
-                       top_val = i;
-                       break;
-               }
+       timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod),
+                                      &top_val);
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+               wdd->pretimeout = timeout;
+       else
+               wdd->pretimeout = 0;
 
        /*
         * Set the new value in the watchdog.  Some versions of dw_wdt
@@ -114,25 +226,47 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
        writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
               dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
+       /* Kick new TOP value into the watchdog counter if activated. */
+       if (watchdog_active(wdd))
+               dw_wdt_ping(wdd);
+
        /*
         * In case users set bigger timeout value than HW can support,
         * kernel(watchdog_dev.c) helps to feed watchdog before
         * wdd->max_hw_heartbeat_ms
         */
        if (top_s * 1000 <= wdd->max_hw_heartbeat_ms)
-               wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
+               wdd->timeout = timeout * dw_wdt->rmod;
        else
                wdd->timeout = top_s;
 
        return 0;
 }
 
+static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
+{
+       struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+
+       /*
+        * We ignore actual value of the timeout passed from user-space
+        * using it as a flag whether the pretimeout functionality is intended
+        * to be activated.
+        */
+       dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET);
+       dw_wdt_set_timeout(wdd, wdd->timeout);
+
+       return 0;
+}
+
 static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
 {
        u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
 
-       /* Disable interrupt mode; always perform system reset. */
-       val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+       /* Disable/enable interrupt mode depending on the RMOD flag. */
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ)
+               val |= WDOG_CONTROL_REG_RESP_MODE_MASK;
+       else
+               val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
        /* Enable watchdog. */
        val |= WDOG_CONTROL_REG_WDT_EN_MASK;
        writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
@@ -170,6 +304,7 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
 
        writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+       dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
        if (dw_wdt_is_enabled(dw_wdt))
                writel(WDOG_COUNTER_RESTART_KICK_VALUE,
                       dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
@@ -185,9 +320,19 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
 static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
 {
        struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+       unsigned int sec;
+       u32 val;
+
+       val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET);
+       sec = val / dw_wdt->rate;
+
+       if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) {
+               val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+               if (!val)
+                       sec += wdd->pretimeout;
+       }
 
-       return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
-               dw_wdt->rate;
+       return sec;
 }
 
 static const struct watchdog_info dw_wdt_ident = {
@@ -196,16 +341,41 @@ static const struct watchdog_info dw_wdt_ident = {
        .identity       = "Synopsys DesignWare Watchdog",
 };
 
+static const struct watchdog_info dw_wdt_pt_ident = {
+       .options        = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+                         WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE,
+       .identity       = "Synopsys DesignWare Watchdog",
+};
+
 static const struct watchdog_ops dw_wdt_ops = {
        .owner          = THIS_MODULE,
        .start          = dw_wdt_start,
        .stop           = dw_wdt_stop,
        .ping           = dw_wdt_ping,
        .set_timeout    = dw_wdt_set_timeout,
+       .set_pretimeout = dw_wdt_set_pretimeout,
        .get_timeleft   = dw_wdt_get_timeleft,
        .restart        = dw_wdt_restart,
 };
 
+static irqreturn_t dw_wdt_irq(int irq, void *devid)
+{
+       struct dw_wdt *dw_wdt = devid;
+       u32 val;
+
+       /*
+        * We don't clear the IRQ status. It's supposed to be done by the
+        * following ping operations.
+        */
+       val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET);
+       if (!val)
+               return IRQ_NONE;
+
+       watchdog_notify_pretimeout(&dw_wdt->wdd);
+
+       return IRQ_HANDLED;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int dw_wdt_suspend(struct device *dev)
 {
@@ -214,6 +384,7 @@ static int dw_wdt_suspend(struct device *dev)
        dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
        dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
 
+       clk_disable_unprepare(dw_wdt->pclk);
        clk_disable_unprepare(dw_wdt->clk);
 
        return 0;
@@ -227,6 +398,12 @@ static int dw_wdt_resume(struct device *dev)
        if (err)
                return err;
 
+       err = clk_prepare_enable(dw_wdt->pclk);
+       if (err) {
+               clk_disable_unprepare(dw_wdt->clk);
+               return err;
+       }
+
        writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
        writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
 
@@ -238,6 +415,139 @@ static int dw_wdt_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
 
+/*
+ * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the
+ * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers
+ * depending on the system engineer imagination. The next method handles the
+ * passed TOPs array to pre-calculate the effective timeouts and to sort the
+ * TOP items out in the ascending order with respect to the timeouts.
+ */
+
+static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops)
+{
+       struct dw_wdt_timeout tout, *dst;
+       int val, tidx;
+       u64 msec;
+
+       /*
+        * We walk over the passed TOPs array and calculate corresponding
+        * timeouts in seconds and milliseconds. The milliseconds granularity
+        * is needed to distinguish the TOPs with very close timeouts and to
+        * set the watchdog max heartbeat setting further.
+        */
+       for (val = 0; val < DW_WDT_NUM_TOPS; ++val) {
+               tout.top_val = val;
+               tout.sec = tops[val] / dw_wdt->rate;
+               msec = (u64)tops[val] * MSEC_PER_SEC;
+               do_div(msec, dw_wdt->rate);
+               tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC);
+
+               /*
+                * Find a suitable place for the current TOP in the timeouts
+                * array so that the list is remained in the ascending order.
+                */
+               for (tidx = 0; tidx < val; ++tidx) {
+                       dst = &dw_wdt->timeouts[tidx];
+                       if (tout.sec > dst->sec || (tout.sec == dst->sec &&
+                           tout.msec >= dst->msec))
+                               continue;
+                       else
+                               swap(*dst, tout);
+               }
+
+               dw_wdt->timeouts[val] = tout;
+       }
+}
+
+static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev)
+{
+       u32 data, of_tops[DW_WDT_NUM_TOPS];
+       const u32 *tops;
+       int ret;
+
+       /*
+        * Retrieve custom or fixed counter values depending on the
+        * WDT_USE_FIX_TOP flag found in the component specific parameters
+        * #1 register.
+        */
+       data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET);
+       if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) {
+               tops = dw_wdt_fix_tops;
+       } else {
+               ret = of_property_read_variable_u32_array(dev_of_node(dev),
+                       "snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS,
+                       DW_WDT_NUM_TOPS);
+               if (ret < 0) {
+                       dev_warn(dev, "No valid TOPs array specified\n");
+                       tops = dw_wdt_fix_tops;
+               } else {
+                       tops = of_tops;
+               }
+       }
+
+       /* Convert the specified TOPs into an array of watchdog timeouts. */
+       dw_wdt_handle_tops(dw_wdt, tops);
+       if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) {
+               dev_err(dev, "No any valid TOP detected\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define DW_WDT_DBGFS_REG(_name, _off) \
+{                                    \
+       .name = _name,                \
+       .offset = _off                \
+}
+
+static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = {
+       DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET),
+       DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET),
+       DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET),
+       DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET),
+       DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET),
+       DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET),
+       DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET),
+       DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET)
+};
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt)
+{
+       struct device *dev = dw_wdt->wdd.parent;
+       struct debugfs_regset32 *regset;
+
+       regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+       if (!regset)
+               return;
+
+       regset->regs = dw_wdt_dbgfs_regs;
+       regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs);
+       regset->base = dw_wdt->regs;
+
+       dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+       debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset);
+}
+
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt)
+{
+       debugfs_remove_recursive(dw_wdt->dbgfs_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {}
+static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {}
+
+#endif /* !CONFIG_DEBUG_FS */
+
 static int dw_wdt_drv_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -253,9 +563,18 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
        if (IS_ERR(dw_wdt->regs))
                return PTR_ERR(dw_wdt->regs);
 
-       dw_wdt->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(dw_wdt->clk))
-               return PTR_ERR(dw_wdt->clk);
+       /*
+        * Try to request the watchdog dedicated timer clock source. It must
+        * be supplied if asynchronous mode is enabled. Otherwise fallback
+        * to the common timer/bus clocks configuration, in which the very
+        * first found clock supply both timer and APB signals.
+        */
+       dw_wdt->clk = devm_clk_get(dev, "tclk");
+       if (IS_ERR(dw_wdt->clk)) {
+               dw_wdt->clk = devm_clk_get(dev, NULL);
+               if (IS_ERR(dw_wdt->clk))
+                       return PTR_ERR(dw_wdt->clk);
+       }
 
        ret = clk_prepare_enable(dw_wdt->clk);
        if (ret)
@@ -267,20 +586,64 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
                goto out_disable_clk;
        }
 
+       /*
+        * Request APB clock if device is configured with async clocks mode.
+        * In this case both tclk and pclk clocks are supposed to be specified.
+        * Alas we can't know for sure whether async mode was really activated,
+        * so the pclk phandle reference is left optional. If it couldn't be
+        * found we consider the device configured in synchronous clocks mode.
+        */
+       dw_wdt->pclk = devm_clk_get_optional(dev, "pclk");
+       if (IS_ERR(dw_wdt->pclk)) {
+               ret = PTR_ERR(dw_wdt->pclk);
+               goto out_disable_clk;
+       }
+
+       ret = clk_prepare_enable(dw_wdt->pclk);
+       if (ret)
+               goto out_disable_clk;
+
        dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
        if (IS_ERR(dw_wdt->rst)) {
                ret = PTR_ERR(dw_wdt->rst);
-               goto out_disable_clk;
+               goto out_disable_pclk;
+       }
+
+       /* Enable normal reset without pre-timeout by default. */
+       dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET);
+
+       /*
+        * Pre-timeout IRQ is optional, since some hardware may lack support
+        * of it. Note we must request rising-edge IRQ, since the lane is left
+        * pending either until the next watchdog kick event or up to the
+        * system reset.
+        */
+       ret = platform_get_irq_optional(pdev, 0);
+       if (ret > 0) {
+               ret = devm_request_irq(dev, ret, dw_wdt_irq,
+                                      IRQF_SHARED | IRQF_TRIGGER_RISING,
+                                      pdev->name, dw_wdt);
+               if (ret)
+                       goto out_disable_pclk;
+
+               dw_wdt->wdd.info = &dw_wdt_pt_ident;
+       } else {
+               if (ret == -EPROBE_DEFER)
+                       goto out_disable_pclk;
+
+               dw_wdt->wdd.info = &dw_wdt_ident;
        }
 
        reset_control_deassert(dw_wdt->rst);
 
+       ret = dw_wdt_init_timeouts(dw_wdt, dev);
+       if (ret)
+               goto out_disable_clk;
+
        wdd = &dw_wdt->wdd;
-       wdd->info = &dw_wdt_ident;
        wdd->ops = &dw_wdt_ops;
-       wdd->min_timeout = 1;
-       wdd->max_hw_heartbeat_ms =
-               dw_wdt_top_in_seconds(dw_wdt, DW_WDT_MAX_TOP) * 1000;
+       wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt);
+       wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt);
        wdd->parent = dev;
 
        watchdog_set_drvdata(wdd, dw_wdt);
@@ -293,7 +656,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
         * devicetree.
         */
        if (dw_wdt_is_enabled(dw_wdt)) {
-               wdd->timeout = dw_wdt_get_top(dw_wdt);
+               wdd->timeout = dw_wdt_get_timeout(dw_wdt);
                set_bit(WDOG_HW_RUNNING, &wdd->status);
        } else {
                wdd->timeout = DW_WDT_DEFAULT_SECONDS;
@@ -306,10 +669,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
 
        ret = watchdog_register_device(wdd);
        if (ret)
-               goto out_disable_clk;
+               goto out_disable_pclk;
+
+       dw_wdt_dbgfs_init(dw_wdt);
 
        return 0;
 
+out_disable_pclk:
+       clk_disable_unprepare(dw_wdt->pclk);
+
 out_disable_clk:
        clk_disable_unprepare(dw_wdt->clk);
        return ret;
@@ -319,8 +687,11 @@ static int dw_wdt_drv_remove(struct platform_device *pdev)
 {
        struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
 
+       dw_wdt_dbgfs_clear(dw_wdt);
+
        watchdog_unregister_device(&dw_wdt->wdd);
        reset_control_assert(dw_wdt->rst);
+       clk_disable_unprepare(dw_wdt->pclk);
        clk_disable_unprepare(dw_wdt->clk);
 
        return 0;
index f5ffa7b..2418ebb 100644 (file)
@@ -286,7 +286,7 @@ static long eurwdt_ioctl(struct file *file,
                eurwdt_timeout = time;
                eurwdt_set_timeout(time);
                spin_unlock(&eurwdt_lock);
-               /* fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(eurwdt_timeout, p);
index a3c44d7..f60beec 100644 (file)
@@ -306,27 +306,6 @@ exit_unlock:
        return err;
 }
 
-static int f71862fg_pin_configure(unsigned short ioaddr)
-{
-       /* When ioaddr is non-zero the calling function has to take care of
-          mutex handling and superio preparation! */
-
-       if (f71862fg_pin == 63) {
-               if (ioaddr) {
-                       /* SPI must be disabled first to use this pin! */
-                       superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
-                       superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
-               }
-       } else if (f71862fg_pin == 56) {
-               if (ioaddr)
-                       superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
-       } else {
-               pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin);
-               return -EINVAL;
-       }
-       return 0;
-}
-
 static int watchdog_start(void)
 {
        int err;
@@ -352,9 +331,13 @@ static int watchdog_start(void)
                break;
 
        case f71862fg:
-               err = f71862fg_pin_configure(watchdog.sioaddr);
-               if (err)
-                       goto exit_superio;
+               if (f71862fg_pin == 63) {
+                       /* SPI must be disabled first to use this pin! */
+                       superio_clear_bit(watchdog.sioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+                       superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 4);
+               } else if (f71862fg_pin == 56) {
+                       superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
+               }
                break;
 
        case f71868:
@@ -629,7 +612,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
 
                if (new_options & WDIOS_ENABLECARD)
                        return watchdog_start();
-               /* fall through */
+               fallthrough;
 
        case WDIOC_KEEPALIVE:
                watchdog_keepalive();
@@ -643,7 +626,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                        return -EINVAL;
 
                watchdog_keepalive();
-               /* fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(watchdog.timeout, uarg.i);
@@ -690,9 +673,9 @@ static int __init watchdog_init(int sioaddr)
         * into the module have been registered yet.
         */
        watchdog.sioaddr = sioaddr;
-       watchdog.ident.options = WDIOC_SETTIMEOUT
-                               | WDIOF_MAGICCLOSE
-                               | WDIOF_KEEPALIVEPING;
+       watchdog.ident.options = WDIOF_MAGICCLOSE
+                               | WDIOF_KEEPALIVEPING
+                               | WDIOF_CARDRESET;
 
        snprintf(watchdog.ident.identity,
                sizeof(watchdog.ident.identity), "%s watchdog",
@@ -706,6 +689,13 @@ static int __init watchdog_init(int sioaddr)
        wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
        watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
 
+       /*
+        * We don't want WDTMOUT_STS to stick around till regular reboot.
+        * Write 1 to the bit to clear it to zero.
+        */
+       superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
+                    wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
+
        superio_exit(sioaddr);
 
        err = watchdog_set_timeout(timeout);
@@ -803,7 +793,6 @@ static int __init f71808e_find(int sioaddr)
                break;
        case SIO_F71862_ID:
                watchdog.type = f71862fg;
-               err = f71862fg_pin_configure(0); /* validate module parameter */
                break;
        case SIO_F71868_ID:
                watchdog.type = f71868;
@@ -852,6 +841,11 @@ static int __init f71808e_init(void)
        int err = -ENODEV;
        int i;
 
+       if (f71862fg_pin != 63 && f71862fg_pin != 56) {
+               pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin);
+               return -EINVAL;
+       }
+
        for (i = 0; i < ARRAY_SIZE(addrs); i++) {
                err = f71808e_find(addrs[i]);
                if (err == 0)
index f6541d1..df5406a 100644 (file)
@@ -201,7 +201,7 @@ static long gef_wdt_ioctl(struct file *file, unsigned int cmd,
                if (get_user(timeout, (int __user *)argp))
                        return -EFAULT;
                gef_wdt_set_timeout(timeout);
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                if (put_user(gef_wdt_timeout, (int __user *)argp))
index 9914a42..8341892 100644 (file)
@@ -185,7 +185,7 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
 
                if (geodewdt_set_heartbeat(interval))
                        return -EINVAL;
-       /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
 
index 2b65ea9..a0ddedc 100644 (file)
@@ -214,7 +214,7 @@ static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (ibwdt_set_heartbeat(new_margin))
                        return -EINVAL;
                ibwdt_ping();
-               /* fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
index 2fed40d..9b89d2f 100644 (file)
@@ -303,7 +303,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
 
                superio_exit();
                it8712f_wdt_ping();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                if (put_user(margin, p))
                        return -EFAULT;
index 0988661..aae29dc 100644 (file)
@@ -136,7 +136,7 @@ static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd,
 
                heartbeat = time;
                wdt_enable();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                ret = put_user(heartbeat, (int *)arg);
index 60ed625..f388a76 100644 (file)
@@ -155,7 +155,7 @@ static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
 
                heartbeat = time;
                wdt_enable();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                ret = put_user(heartbeat, (int *)arg);
index 80ff946..743377c 100644 (file)
@@ -171,7 +171,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n)
        switch (n) {
        case WD1:
                zf_writew(COUNTER_1, new);
-               /* fall through */
+               fallthrough;
        case WD2:
                zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
        default:
index 03b9ac4..5419336 100644 (file)
@@ -21,6 +21,7 @@
 #define MLXREG_WDT_CLOCK_SCALE         1000
 #define MLXREG_WDT_MAX_TIMEOUT_TYPE1   32
 #define MLXREG_WDT_MAX_TIMEOUT_TYPE2   255
+#define MLXREG_WDT_MAX_TIMEOUT_TYPE3   65535
 #define MLXREG_WDT_MIN_TIMEOUT         1
 #define MLXREG_WDT_OPTIONS_BASE (WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | \
                                 WDIOF_SETTIMEOUT)
@@ -49,6 +50,7 @@ struct mlxreg_wdt {
        int tleft_idx;
        int ping_idx;
        int reset_idx;
+       int regmap_val_sz;
        enum mlxreg_wdt_type wdt_type;
 };
 
@@ -111,7 +113,8 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
        u32 regval, set_time, hw_timeout;
        int rc;
 
-       if (wdt->wdt_type == MLX_WDT_TYPE1) {
+       switch (wdt->wdt_type) {
+       case MLX_WDT_TYPE1:
                rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
                if (rc)
                        return rc;
@@ -120,14 +123,32 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd,
                regval = (regval & reg_data->mask) | hw_timeout;
                /* Rowndown to actual closest number of sec. */
                set_time = BIT(hw_timeout) / MLXREG_WDT_CLOCK_SCALE;
-       } else {
+               rc = regmap_write(wdt->regmap, reg_data->reg, regval);
+               break;
+       case MLX_WDT_TYPE2:
+               set_time = timeout;
+               rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
+               break;
+       case MLX_WDT_TYPE3:
+               /* WD_TYPE3 has 2B set time register */
                set_time = timeout;
-               regval = timeout;
+               if (wdt->regmap_val_sz == 1) {
+                       regval = timeout & 0xff;
+                       rc = regmap_write(wdt->regmap, reg_data->reg, regval);
+                       if (!rc) {
+                               regval = (timeout & 0xff00) >> 8;
+                               rc = regmap_write(wdt->regmap,
+                                               reg_data->reg + 1, regval);
+                       }
+               } else {
+                       rc = regmap_write(wdt->regmap, reg_data->reg, timeout);
+               }
+               break;
+       default:
+               return -EINVAL;
        }
 
        wdd->timeout = set_time;
-       rc = regmap_write(wdt->regmap, reg_data->reg, regval);
-
        if (!rc) {
                /*
                 * Restart watchdog with new timeout period
@@ -147,10 +168,25 @@ static unsigned int mlxreg_wdt_get_timeleft(struct watchdog_device *wdd)
 {
        struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd);
        struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->tleft_idx];
-       u32 regval;
+       u32 regval, msb, lsb;
        int rc;
 
-       rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+       if (wdt->wdt_type == MLX_WDT_TYPE2) {
+               rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+       } else {
+               /* WD_TYPE3 has 2 byte timeleft register */
+               if (wdt->regmap_val_sz == 1) {
+                       rc = regmap_read(wdt->regmap, reg_data->reg, &lsb);
+                       if (!rc) {
+                               rc = regmap_read(wdt->regmap,
+                                               reg_data->reg + 1, &msb);
+                               regval = (msb & 0xff) << 8 | (lsb & 0xff);
+                       }
+               } else {
+                       rc = regmap_read(wdt->regmap, reg_data->reg, &regval);
+               }
+       }
+
        /* Return 0 timeleft in case of failure register read. */
        return rc == 0 ? regval : 0;
 }
@@ -212,13 +248,23 @@ static void mlxreg_wdt_config(struct mlxreg_wdt *wdt,
                wdt->wdd.info = &mlxreg_wdt_aux_info;
 
        wdt->wdt_type = pdata->version;
-       if (wdt->wdt_type == MLX_WDT_TYPE2) {
-               wdt->wdd.ops = &mlxreg_wdt_ops_type2;
-               wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
-       } else {
+       switch (wdt->wdt_type) {
+       case MLX_WDT_TYPE1:
                wdt->wdd.ops = &mlxreg_wdt_ops_type1;
                wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE1;
+               break;
+       case MLX_WDT_TYPE2:
+               wdt->wdd.ops = &mlxreg_wdt_ops_type2;
+               wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2;
+               break;
+       case MLX_WDT_TYPE3:
+               wdt->wdd.ops = &mlxreg_wdt_ops_type2;
+               wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE3;
+               break;
+       default:
+               break;
        }
+
        wdt->wdd.min_timeout = MLXREG_WDT_MIN_TIMEOUT;
 }
 
@@ -249,6 +295,11 @@ static int mlxreg_wdt_probe(struct platform_device *pdev)
 
        wdt->wdd.parent = dev;
        wdt->regmap = pdata->regmap;
+       rc = regmap_get_val_bytes(wdt->regmap);
+       if (rc < 0)
+               return -EINVAL;
+
+       wdt->regmap_val_sz = rc;
        mlxreg_wdt_config(wdt, pdata);
 
        if ((pdata->features & MLXREG_CORE_WD_FEATURE_NOWAYOUT))
index 0bc72dd..894aa63 100644 (file)
@@ -222,7 +222,7 @@ static long mv64x60_wdt_ioctl(struct file *file,
                if (get_user(timeout, (int __user *)argp))
                        return -EFAULT;
                mv64x60_wdt_set_timeout(timeout);
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                if (put_user(mv64x60_wdt_timeout, (int __user *)argp))
index d7a560e..f6902a3 100644 (file)
@@ -7,7 +7,7 @@
  *     Based off i8xx_tco.c:
  *     (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
  *     Reserved.
- *                             http://www.kernelconcepts.de
+ *                             https://www.kernelconcepts.de
  *
  *     TCO timer driver for NV chipsets
  *     based on softdog.c by Alan Cox <alan@redhat.com>
@@ -250,7 +250,7 @@ static long nv_tco_ioctl(struct file *file, unsigned int cmd,
                if (tco_timer_set_heartbeat(new_heartbeat))
                        return -EINVAL;
                tco_timer_keepalive();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, p);
        default:
index d325e52..c65f825 100644 (file)
@@ -9,7 +9,7 @@
  *
  *     (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
  *     Reserved.
- *                             http://www.kernelconcepts.de
+ *                             https://www.kernelconcepts.de
  *
  *     Neither kernel concepts nor Nils Faerber admit liability nor provide
  *     warranty for any of this software. This material is provided
index 73fbfc9..2d45043 100644 (file)
@@ -433,7 +433,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd,
                        return -EINVAL;
                timeout = new_timeout;
                pc87413_refresh();
-               /* fall through - and return the new timeout... */
+               fallthrough;    /* and return the new timeout */
        case WDIOC_GETTIMEOUT:
                new_timeout = timeout * 60;
                return put_user(new_timeout, uarg.i);
index 7a0587f..e86fa7f 100644 (file)
@@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return -EINVAL;
 
                pcwd_keepalive();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, argp);
index 81508a4..54d86fc 100644 (file)
@@ -542,7 +542,7 @@ static long pcipcwd_ioctl(struct file *file, unsigned int cmd,
 
                pcipcwd_keepalive();
        }
-               /* fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, p);
index 2f44af1..41a928e 100644 (file)
@@ -452,7 +452,7 @@ static long usb_pcwd_ioctl(struct file *file, unsigned int cmd,
 
                usb_pcwd_keepalive(usb_pcwd_device);
        }
-               /* fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, p);
@@ -585,9 +585,8 @@ static struct notifier_block usb_pcwd_notifier = {
 static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
 {
        usb_free_urb(usb_pcwd->intr_urb);
-       if (usb_pcwd->intr_buffer != NULL)
-               usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
-                                 usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
+       usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
+                         usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
        kfree(usb_pcwd);
 }
 
index aee3c2e..e74802f 100644 (file)
@@ -230,7 +230,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
                        return -EFAULT;
                if (rc32434_wdt_set(new_timeout))
                        return -EINVAL;
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
        default:
index 1b9a6dc..7008596 100644 (file)
@@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        return -EINVAL;
                riowd_timeout = (new_margin + 59) / 60;
                riowd_writereg(p, riowd_timeout, WDTO_INDEX);
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(riowd_timeout * 60, (int __user *)argp);
index d456dd7..705e8f7 100644 (file)
 
 #define RTIWWDRX_NMI   0xa
 
-#define RTIWWDSIZE_50P 0x50
+#define RTIWWDSIZE_50P         0x50
+#define RTIWWDSIZE_25P         0x500
+#define RTIWWDSIZE_12P5                0x5000
+#define RTIWWDSIZE_6P25                0x50000
+#define RTIWWDSIZE_3P125       0x500000
 
 #define WDENABLE_KEY   0xa98559da
 
@@ -48,7 +52,7 @@
 
 #define DWDST                  BIT(1)
 
-static int heartbeat;
+static int heartbeat = DEFAULT_HEARTBEAT;
 
 /*
  * struct to hold data for each WDT device
@@ -79,11 +83,9 @@ static int rti_wdt_start(struct watchdog_device *wdd)
         * be petted during the open window; not too early or not too late.
         * The HW configuration options only allow for the open window size
         * to be 50% or less than that; we obviouly want to configure the open
-        * window as large as possible so we select the 50% option. To avoid
-        * any glitches, we accommodate 5% safety margin also, so we setup
-        * the min_hw_hearbeat at 55% of the timeout period.
+        * window as large as possible so we select the 50% option.
         */
-       wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20;
+       wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
 
        /* Generate NMI when wdt expires */
        writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
@@ -110,7 +112,48 @@ static int rti_wdt_ping(struct watchdog_device *wdd)
        return 0;
 }
 
-static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
+{
+       /*
+        * RTI only supports a windowed mode, where the watchdog can only
+        * be petted during the open window; not too early or not too late.
+        * The HW configuration options only allow for the open window size
+        * to be 50% or less than that.
+        */
+       switch (wsize) {
+       case RTIWWDSIZE_50P:
+               /* 50% open window => 50% min heartbeat */
+               wdd->min_hw_heartbeat_ms = 500 * heartbeat;
+               break;
+
+       case RTIWWDSIZE_25P:
+               /* 25% open window => 75% min heartbeat */
+               wdd->min_hw_heartbeat_ms = 750 * heartbeat;
+               break;
+
+       case RTIWWDSIZE_12P5:
+               /* 12.5% open window => 87.5% min heartbeat */
+               wdd->min_hw_heartbeat_ms = 875 * heartbeat;
+               break;
+
+       case RTIWWDSIZE_6P25:
+               /* 6.5% open window => 93.5% min heartbeat */
+               wdd->min_hw_heartbeat_ms = 935 * heartbeat;
+               break;
+
+       case RTIWWDSIZE_3P125:
+               /* 3.125% open window => 96.9% min heartbeat */
+               wdd->min_hw_heartbeat_ms = 969 * heartbeat;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static unsigned int rti_wdt_get_timeleft_ms(struct watchdog_device *wdd)
 {
        u64 timer_counter;
        u32 val;
@@ -123,11 +166,18 @@ static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
 
        timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR);
 
+       timer_counter *= 1000;
+
        do_div(timer_counter, wdt->freq);
 
        return timer_counter;
 }
 
+static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+       return rti_wdt_get_timeleft_ms(wdd) / 1000;
+}
+
 static const struct watchdog_info rti_wdt_info = {
        .options = WDIOF_KEEPALIVEPING,
        .identity = "K3 RTI Watchdog",
@@ -148,6 +198,7 @@ static int rti_wdt_probe(struct platform_device *pdev)
        struct watchdog_device *wdd;
        struct rti_wdt_device *wdt;
        struct clk *clk;
+       u32 last_ping = 0;
 
        wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
        if (!wdt)
@@ -169,6 +220,14 @@ static int rti_wdt_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       /*
+        * If watchdog is running at 32k clock, it is not accurate.
+        * Adjust frequency down in this case so that we don't pet
+        * the watchdog too often.
+        */
+       if (wdt->freq < 32768)
+               wdt->freq = wdt->freq * 9 / 10;
+
        pm_runtime_enable(dev);
        ret = pm_runtime_get_sync(dev);
        if (ret) {
@@ -185,11 +244,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
        wdd->min_timeout = 1;
        wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
                wdt->freq * 1000;
-       wdd->timeout = DEFAULT_HEARTBEAT;
        wdd->parent = dev;
 
-       watchdog_init_timeout(wdd, heartbeat, dev);
-
        watchdog_set_drvdata(wdd, wdt);
        watchdog_set_nowayout(wdd, 1);
        watchdog_set_restart_priority(wdd, 128);
@@ -201,16 +257,53 @@ static int rti_wdt_probe(struct platform_device *pdev)
                goto err_iomap;
        }
 
+       if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) {
+               u32 time_left_ms;
+               u64 heartbeat_ms;
+               u32 wsize;
+
+               set_bit(WDOG_HW_RUNNING, &wdd->status);
+               time_left_ms = rti_wdt_get_timeleft_ms(wdd);
+               heartbeat_ms = readl(wdt->base + RTIDWDPRLD);
+               heartbeat_ms <<= WDT_PRELOAD_SHIFT;
+               heartbeat_ms *= 1000;
+               do_div(heartbeat_ms, wdt->freq);
+               if (heartbeat_ms != heartbeat * 1000)
+                       dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n");
+
+               heartbeat = heartbeat_ms;
+               heartbeat /= 1000;
+
+               wsize = readl(wdt->base + RTIWWDSIZECTRL);
+               ret = rti_wdt_setup_hw_hb(wdd, wsize);
+               if (ret) {
+                       dev_err(dev, "bad window size.\n");
+                       goto err_iomap;
+               }
+
+               last_ping = heartbeat_ms - time_left_ms;
+               if (time_left_ms > heartbeat_ms) {
+                       dev_warn(dev, "time_left > heartbeat? Assuming last ping just before now.\n");
+                       last_ping = 0;
+               }
+       }
+
+       watchdog_init_timeout(wdd, heartbeat, dev);
+
        ret = watchdog_register_device(wdd);
        if (ret) {
                dev_err(dev, "cannot register watchdog device\n");
                goto err_iomap;
        }
 
+       if (last_ping)
+               watchdog_set_last_hw_keepalive(wdd, last_ping);
+
        return 0;
 
 err_iomap:
        pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
 
        return ret;
 }
@@ -221,6 +314,7 @@ static int rti_wdt_remove(struct platform_device *pdev)
 
        watchdog_unregister_device(&wdt->wdd);
        pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
 
        return 0;
 }
index 9b93be0..27846c6 100644 (file)
@@ -127,7 +127,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
 
                pre_margin = oscr_freq * time;
                writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
-               /*fall through*/
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                ret = put_user(pre_margin / oscr_freq, p);
index da2dad0..504be46 100644 (file)
@@ -202,7 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd,
                timeout = time;
                sbwdog_set(user_dog, timeout);
                sbwdog_pet(user_dog);
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                /*
index f2cbe6d..a947a63 100644 (file)
@@ -265,7 +265,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                timeout = new_timeout;
                wdt_keepalive();
        }
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index 520b8dd..d640b26 100644 (file)
@@ -195,7 +195,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (wdt_set_timeout(new_timeout))
                        return -EINVAL;
        }
-       /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, (int __user *)arg);
        default:
index 1b20b33..04483d6 100644 (file)
@@ -154,7 +154,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
 
                margin = time;
                wdt_enable();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                ret = put_user(margin, (int *)arg);
index fbe79bc..e66e6b9 100644 (file)
@@ -321,7 +321,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                wdt_keepalive();
        }
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index 83949a3..d8b77fe 100644 (file)
@@ -295,7 +295,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
                if (sch311x_wdt_set_heartbeat(new_timeout))
                        return -EINVAL;
                sch311x_wdt_keepalive();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index c94098a..7b5e183 100644 (file)
@@ -186,7 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
                margin = new_margin;
                scx200_wdt_update_margin();
                scx200_wdt_ping();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                if (put_user(margin, p))
                        return -EFAULT;
index 43de56a..7463df4 100644 (file)
@@ -474,7 +474,7 @@ static long wb_smsc_wdt_ioctl(struct file *file,
                        return -EINVAL;
                timeout = new_timeout;
                wb_smsc_wdt_set_timeout(timeout);
-               /* fall through - and return the new timeout... */
+               fallthrough;    /* and return the new timeout */
        case WDIOC_GETTIMEOUT:
                new_timeout = timeout;
                if (unit == UNIT_MINUTE)
index 3e4885c..7a10962 100644 (file)
 #include <linux/hrtimer.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/reboot.h>
 #include <linux/types.h>
 #include <linux/watchdog.h>
+#include <linux/workqueue.h>
 
 #define TIMER_MARGIN   60              /* Default is 60 seconds */
 static unsigned int soft_margin = TIMER_MARGIN;        /* in seconds */
@@ -49,11 +51,34 @@ module_param(soft_panic, int, 0);
 MODULE_PARM_DESC(soft_panic,
        "Softdog action, set to 1 to panic, 0 to reboot (default=0)");
 
+static char *soft_reboot_cmd;
+module_param(soft_reboot_cmd, charp, 0000);
+MODULE_PARM_DESC(soft_reboot_cmd,
+       "Set reboot command. Emergency reboot takes place if unset");
+
+static bool soft_active_on_boot;
+module_param(soft_active_on_boot, bool, 0000);
+MODULE_PARM_DESC(soft_active_on_boot,
+       "Set to true to active Softdog on boot (default=false)");
+
 static struct hrtimer softdog_ticktock;
 static struct hrtimer softdog_preticktock;
 
+static int reboot_kthread_fn(void *data)
+{
+       kernel_restart(soft_reboot_cmd);
+       return -EPERM; /* Should not reach here */
+}
+
+static void reboot_work_fn(struct work_struct *unused)
+{
+       kthread_run(reboot_kthread_fn, NULL, "softdog_reboot");
+}
+
 static enum hrtimer_restart softdog_fire(struct hrtimer *timer)
 {
+       static bool soft_reboot_fired;
+
        module_put(THIS_MODULE);
        if (soft_noboot) {
                pr_crit("Triggered - Reboot ignored\n");
@@ -62,6 +87,33 @@ static enum hrtimer_restart softdog_fire(struct hrtimer *timer)
                panic("Software Watchdog Timer expired");
        } else {
                pr_crit("Initiating system reboot\n");
+               if (!soft_reboot_fired && soft_reboot_cmd != NULL) {
+                       static DECLARE_WORK(reboot_work, reboot_work_fn);
+                       /*
+                        * The 'kernel_restart' is a 'might-sleep' operation.
+                        * Also, executing it in system-wide workqueues blocks
+                        * any driver from using the same workqueue in its
+                        * shutdown callback function. Thus, we should execute
+                        * the 'kernel_restart' in a standalone kernel thread.
+                        * But since starting a kernel thread is also a
+                        * 'might-sleep' operation, so the 'reboot_work' is
+                        * required as a launcher of the kernel thread.
+                        *
+                        * After request the reboot, restart the timer to
+                        * schedule an 'emergency_restart' reboot after
+                        * 'TIMER_MARGIN' seconds. It's because if the softdog
+                        * hangs, it might be because of scheduling issues. And
+                        * if that is the case, both 'schedule_work' and
+                        * 'kernel_restart' may possibly be malfunctional at the
+                        * same time.
+                        */
+                       soft_reboot_fired = true;
+                       schedule_work(&reboot_work);
+                       hrtimer_add_expires_ns(timer,
+                                       (u64)TIMER_MARGIN * NSEC_PER_SEC);
+
+                       return HRTIMER_RESTART;
+               }
                emergency_restart();
                pr_crit("Reboot didn't ?????\n");
        }
@@ -145,12 +197,17 @@ static int __init softdog_init(void)
                softdog_preticktock.function = softdog_pretimeout;
        }
 
+       if (soft_active_on_boot)
+               softdog_ping(&softdog_dev);
+
        ret = watchdog_register_device(&softdog_dev);
        if (ret)
                return ret;
 
        pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
                soft_noboot, softdog_dev.timeout, soft_panic, nowayout);
+       pr_info("             soft_reboot_cmd=%s soft_active_on_boot=%d\n",
+               soft_reboot_cmd ?: "<not set>", soft_active_on_boot);
 
        return 0;
 }
index 93bd302..85e9664 100644 (file)
@@ -7,7 +7,7 @@
  *     Based on i8xx_tco.c:
  *     (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
  *     Reserved.
- *                             http://www.kernelconcepts.de
+ *                             https://www.kernelconcepts.de
  *
  *     See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide",
  *         AMD Publication 45482 "AMD SB800-Series Southbridges Register
index 5f05a45..b507578 100644 (file)
@@ -235,7 +235,7 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
 
        sunxi_wdt = devm_kzalloc(dev, sizeof(*sunxi_wdt), GFP_KERNEL);
        if (!sunxi_wdt)
-               return -EINVAL;
+               return -ENOMEM;
 
        sunxi_wdt->wdt_regs = of_device_get_match_data(dev);
        if (!sunxi_wdt->wdt_regs)
index 6b3b667..5772cc5 100644 (file)
@@ -289,7 +289,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                timeout = new_timeout;
                wdt_keepalive();
        }
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
        default:
index 5212e68..fd64ae7 100644 (file)
@@ -422,7 +422,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return -EINVAL;
 
                wdt_keepalive();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, uarg.i);
index a692584..a8a1ed2 100644 (file)
@@ -174,7 +174,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
                timeout = new_timeout;
                wafwdt_stop();
                wafwdt_start();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, p);
 
index b9dc2c3..6798add 100644 (file)
@@ -275,15 +275,18 @@ static int watchdog_start(struct watchdog_device *wdd)
        set_bit(_WDOG_KEEPALIVE, &wd_data->status);
 
        started_at = ktime_get();
-       if (watchdog_hw_running(wdd) && wdd->ops->ping)
-               err = wdd->ops->ping(wdd);
-       else
+       if (watchdog_hw_running(wdd) && wdd->ops->ping) {
+               err = __watchdog_ping(wdd);
+               if (err == 0)
+                       set_bit(WDOG_ACTIVE, &wdd->status);
+       } else {
                err = wdd->ops->start(wdd);
-       if (err == 0) {
-               set_bit(WDOG_ACTIVE, &wdd->status);
-               wd_data->last_keepalive = started_at;
-               wd_data->last_hw_keepalive = started_at;
-               watchdog_update_worker(wdd);
+               if (err == 0) {
+                       set_bit(WDOG_ACTIVE, &wdd->status);
+                       wd_data->last_keepalive = started_at;
+                       wd_data->last_hw_keepalive = started_at;
+                       watchdog_update_worker(wdd);
+               }
        }
 
        return err;
@@ -587,7 +590,7 @@ static DEVICE_ATTR_RW(pretimeout_governor);
 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
                                int n)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct watchdog_device *wdd = dev_get_drvdata(dev);
        umode_t mode = attr->mode;
 
@@ -776,7 +779,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                err = watchdog_ping(wdd);
                if (err < 0)
                        break;
-               /* fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                /* timeout == 0 means that we don't know the timeout */
                if (wdd->timeout == 0) {
@@ -916,7 +919,7 @@ static int watchdog_release(struct inode *inode, struct file *file)
         * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
         * watchdog_stop will fail.
         */
-       if (!test_bit(WDOG_ACTIVE, &wdd->status))
+       if (!watchdog_active(wdd))
                err = 0;
        else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
                 !(wdd->info->options & WDIOF_MAGICCLOSE))
@@ -994,6 +997,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
        if (IS_ERR_OR_NULL(watchdog_kworker))
                return -ENODEV;
 
+       device_initialize(&wd_data->dev);
+       wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+       wd_data->dev.class = &watchdog_class;
+       wd_data->dev.parent = wdd->parent;
+       wd_data->dev.groups = wdd->groups;
+       wd_data->dev.release = watchdog_core_data_release;
+       dev_set_drvdata(&wd_data->dev, wdd);
+       dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+
        kthread_init_work(&wd_data->work, watchdog_ping_work);
        hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
        wd_data->timer.function = watchdog_timer_expired;
@@ -1014,15 +1026,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
                }
        }
 
-       device_initialize(&wd_data->dev);
-       wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
-       wd_data->dev.class = &watchdog_class;
-       wd_data->dev.parent = wdd->parent;
-       wd_data->dev.groups = wdd->groups;
-       wd_data->dev.release = watchdog_core_data_release;
-       dev_set_drvdata(&wd_data->dev, wdd);
-       dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
-
        /* Fill in the data structures */
        cdev_init(&wd_data->cdev, &watchdog_fops);
 
@@ -1136,6 +1139,36 @@ void watchdog_dev_unregister(struct watchdog_device *wdd)
 }
 
 /*
+ *     watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog
+ *     @wdd: watchdog device
+ *     @last_ping_ms: time since last HW heartbeat
+ *
+ *     Adjusts the last known HW keepalive time for a watchdog timer.
+ *     This is needed if the watchdog is already running when the probe
+ *     function is called, and it can't be pinged immediately. This
+ *     function must be called immediately after watchdog registration,
+ *     and min_hw_heartbeat_ms must be set for this to be useful.
+ */
+int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
+                                  unsigned int last_ping_ms)
+{
+       struct watchdog_core_data *wd_data;
+       ktime_t now;
+
+       if (!wdd)
+               return -EINVAL;
+
+       wd_data = wdd->wd_data;
+
+       now = ktime_get();
+
+       wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
+
+       return __watchdog_ping(wdd);
+}
+EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
+
+/*
  *     watchdog_dev_init: init dev part of watchdog core
  *
  *     Allocate a range of chardev nodes to use for watchdog devices
index f9054cb..a9e40b5 100644 (file)
@@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (wdt_set_heartbeat(new_heartbeat))
                        return -EINVAL;
                wdt_ping();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, p);
        default:
index e60993d..110249e 100644 (file)
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
                soft_margin = new_margin;
                reload = soft_margin * (mem_fclk_21285 / 256);
                watchdog_ping();
-               /* Fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                ret = put_user(soft_margin, int_arg);
                break;
index 066a4fb..c9b8e86 100644 (file)
@@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd,
                        return -EINVAL;
 
                wdt977_keepalive();
-               /* Fall through */
+               fallthrough;
 
        case WDIOC_GETTIMEOUT:
                return put_user(timeout, uarg.i);
index e528024..c3254ba 100644 (file)
@@ -426,7 +426,7 @@ static long wdtpci_ioctl(struct file *file, unsigned int cmd,
                if (wdtpci_set_heartbeat(new_heartbeat))
                        return -EINVAL;
                wdtpci_ping();
-               /* fall through */
+               fallthrough;
        case WDIOC_GETTIMEOUT:
                return put_user(heartbeat, p);
        default:
index 1d339ef..ea6c1e7 100644 (file)
@@ -52,9 +52,7 @@ config XEN_BALLOON_MEMORY_HOTPLUG
 
 config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
        int "Hotplugged memory limit (in GiB) for a PV guest"
-       default 512 if X86_64
-       default 4 if X86_32
-       range 0 64 if X86_32
+       default 512
        depends on XEN_HAVE_PVMMU
        depends on XEN_BALLOON_MEMORY_HOTPLUG
        help
index 0d322f3..c25c9a6 100644 (file)
@@ -5,8 +5,7 @@ obj-y   += mem-reservation.o
 obj-y  += events/
 obj-y  += xenbus/
 
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_features.o                      := $(nostackp)
+CFLAGS_features.o                      := -fno-stack-protector
 
 dom0-$(CONFIG_ARM64) += arm-device.o
 dom0-$(CONFIG_PCI) += pci.o
index 75d3bb9..b1b6eeb 100644 (file)
@@ -613,6 +613,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
                goto fail_detach;
        }
 
+       /* Check that we have zero offset. */
+       if (sgt->sgl->offset) {
+               ret = ERR_PTR(-EINVAL);
+               pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
+                        sgt->sgl->offset);
+               goto fail_unmap;
+       }
+
        /* Check number of pages that imported buffer has. */
        if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
                ret = ERR_PTR(-EINVAL);
index 15a99f9..39def02 100644 (file)
@@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
        }
 
 #ifdef CONFIG_9P_FSCACHE
-       if (v9ses->fscache) {
+       if (v9ses->fscache)
                v9fs_cache_session_put_cookie(v9ses);
-               kfree(v9ses->cachetag);
-       }
+       kfree(v9ses->cachetag);
 #endif
        kfree(v9ses->uname);
        kfree(v9ses->aname);
index c9255d3..ae0c38a 100644 (file)
@@ -223,8 +223,7 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
 struct inode *v9fs_alloc_inode(struct super_block *sb)
 {
        struct v9fs_inode *v9inode;
-       v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache,
-                                                       GFP_KERNEL);
+       v9inode = kmem_cache_alloc(v9fs_inode_cache, GFP_KERNEL);
        if (!v9inode)
                return NULL;
 #ifdef CONFIG_9P_FSCACHE
@@ -368,59 +367,6 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
        return inode;
 }
 
-/*
-static struct v9fs_fid*
-v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
-{
-       int err;
-       int nfid;
-       struct v9fs_fid *ret;
-       struct v9fs_fcall *fcall;
-
-       nfid = v9fs_get_idpool(&v9ses->fidpool);
-       if (nfid < 0) {
-               eprintk(KERN_WARNING, "no free fids available\n");
-               return ERR_PTR(-ENOSPC);
-       }
-
-       err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name,
-               &fcall);
-
-       if (err < 0) {
-               if (fcall && fcall->id == RWALK)
-                       goto clunk_fid;
-
-               PRINT_FCALL_ERROR("walk error", fcall);
-               v9fs_put_idpool(nfid, &v9ses->fidpool);
-               goto error;
-       }
-
-       kfree(fcall);
-       fcall = NULL;
-       ret = v9fs_fid_create(v9ses, nfid);
-       if (!ret) {
-               err = -ENOMEM;
-               goto clunk_fid;
-       }
-
-       err = v9fs_fid_insert(ret, dentry);
-       if (err < 0) {
-               v9fs_fid_destroy(ret);
-               goto clunk_fid;
-       }
-
-       return ret;
-
-clunk_fid:
-       v9fs_t_clunk(v9ses, nfid);
-
-error:
-       kfree(fcall);
-       return ERR_PTR(err);
-}
-*/
-
-
 /**
  * v9fs_clear_inode - release an inode
  * @inode: inode to release
@@ -1090,7 +1036,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
 {
        int retval;
        struct v9fs_session_info *v9ses;
-       struct p9_fid *fid;
+       struct p9_fid *fid = NULL;
        struct p9_wstat wstat;
 
        p9_debug(P9_DEBUG_VFS, "\n");
@@ -1100,7 +1046,12 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
 
        retval = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
-       fid = v9fs_fid_lookup(dentry);
+       if (iattr->ia_valid & ATTR_FILE) {
+               fid = iattr->ia_file->private_data;
+               WARN_ON(!fid);
+       }
+       if (!fid)
+               fid = v9fs_fid_lookup(dentry);
        if(IS_ERR(fid))
                return PTR_ERR(fid);
 
index 60328b2..0028ecc 100644 (file)
@@ -540,7 +540,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
 int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 {
        int retval;
-       struct p9_fid *fid;
+       struct p9_fid *fid = NULL;
        struct p9_iattr_dotl p9attr;
        struct inode *inode = d_inode(dentry);
 
@@ -560,7 +560,12 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
        p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
        p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
 
-       fid = v9fs_fid_lookup(dentry);
+       if (iattr->ia_valid & ATTR_FILE) {
+               fid = iattr->ia_file->private_data;
+               WARN_ON(!fid);
+       }
+       if (!fid)
+               fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
                return PTR_ERR(fid);
 
index b79879a..7b784af 100644 (file)
@@ -382,15 +382,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
                net->dynroot_sb = NULL;
        mutex_unlock(&net->proc_cells_lock);
 
-       inode_lock(root->d_inode);
-
-       /* Remove all the pins for dirs created for manually added cells */
-       list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
-               if (subdir->d_fsdata) {
-                       subdir->d_fsdata = NULL;
-                       dput(subdir);
+       if (root) {
+               inode_lock(root->d_inode);
+
+               /* Remove all the pins for dirs created for manually added cells */
+               list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
+                       if (subdir->d_fsdata) {
+                               subdir->d_fsdata = NULL;
+                               dput(subdir);
+                       }
                }
-       }
 
-       inode_unlock(root->d_inode);
+               inode_unlock(root->d_inode);
+       }
 }
index 24fd163..97cab12 100644 (file)
@@ -235,6 +235,7 @@ int afs_put_operation(struct afs_operation *op)
        afs_end_cursor(&op->ac);
        afs_put_serverlist(op->net, op->server_list);
        afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
+       key_put(op->key);
        kfree(op);
        return ret;
 }
index f3a0f41..75105f4 100644 (file)
@@ -20,7 +20,7 @@
  * another mount. This situation arises when starting automount(8)
  * or other user space daemon which uses direct mounts or offset
  * mounts (used for autofs lazy mount/umount of nested mount trees),
- * which have been left busy at at service shutdown.
+ * which have been left busy at service shutdown.
  */
 
 typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *,
@@ -496,7 +496,7 @@ static int autofs_dev_ioctl_askumount(struct file *fp,
  * located path is the root of a mount we return 1 along with
  * the super magic of the mount or 0 otherwise.
  *
- * In both cases the the device number (as returned by
+ * In both cases the device number (as returned by
  * new_encode_dev()) is also returned.
  */
 static int autofs_dev_ioctl_ismountpoint(struct file *fp,
index ea10f7b..ea1c28c 100644 (file)
@@ -2303,7 +2303,7 @@ struct btrfs_backref_iter *btrfs_backref_iter_alloc(
                return NULL;
 
        ret->path = btrfs_alloc_path();
-       if (!ret) {
+       if (!ret->path) {
                kfree(ret);
                return NULL;
        }
index f39d47a..219a09a 100644 (file)
@@ -34,6 +34,8 @@ struct io_failure_record;
  */
 #define CHUNK_ALLOCATED                                EXTENT_DIRTY
 #define CHUNK_TRIMMED                          EXTENT_DEFRAG
+#define CHUNK_STATE_MASK                       (CHUNK_ALLOCATED |              \
+                                                CHUNK_TRIMMED)
 
 enum {
        IO_TREE_FS_PINNED_EXTENTS,
index 61ede33..de6fe17 100644 (file)
@@ -33,6 +33,7 @@
 #include "delalloc-space.h"
 #include "block-group.h"
 #include "discard.h"
+#include "rcu-string.h"
 
 #undef SCRAMBLE_DELAYED_REFS
 
@@ -5668,6 +5669,19 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
                                            &start, &end,
                                            CHUNK_TRIMMED | CHUNK_ALLOCATED);
 
+               /* Check if there are any CHUNK_* bits left */
+               if (start > device->total_bytes) {
+                       WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+                       btrfs_warn_in_rcu(fs_info,
+"ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
+                                         start, end - start + 1,
+                                         rcu_str_deref(device->name),
+                                         device->total_bytes);
+                       mutex_unlock(&fs_info->chunk_mutex);
+                       ret = 0;
+                       break;
+               }
+
                /* Ensure we skip the reserved area in the first 1M */
                start = max_t(u64, start, SZ_1M);
 
index 6d961e1..ef0fd7a 100644 (file)
@@ -2282,7 +2282,7 @@ out:
 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
                          struct btrfs_free_space *info, bool update_stat)
 {
-       struct btrfs_free_space *left_info;
+       struct btrfs_free_space *left_info = NULL;
        struct btrfs_free_space *right_info;
        bool merged = false;
        u64 offset = info->offset;
@@ -2298,7 +2298,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
        if (right_info && rb_prev(&right_info->offset_index))
                left_info = rb_entry(rb_prev(&right_info->offset_index),
                                     struct btrfs_free_space, offset_index);
-       else
+       else if (!right_info)
                left_info = tree_search_offset(ctl, offset - 1, 0, 0);
 
        /* See try_merge_free_space() comment. */
index 6dc03ba..51fcd82 100644 (file)
@@ -654,12 +654,18 @@ cont:
                                                     page_error_op |
                                                     PAGE_END_WRITEBACK);
 
-                       for (i = 0; i < nr_pages; i++) {
-                               WARN_ON(pages[i]->mapping);
-                               put_page(pages[i]);
+                       /*
+                        * Ensure we only free the compressed pages if we have
+                        * them allocated, as we can still reach here with
+                        * inode_need_compress() == false.
+                        */
+                       if (pages) {
+                               for (i = 0; i < nr_pages; i++) {
+                                       WARN_ON(pages[i]->mapping);
+                                       put_page(pages[i]);
+                               }
+                               kfree(pages);
                        }
-                       kfree(pages);
-
                        return 0;
                }
        }
@@ -6622,7 +6628,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
            extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
                /* Only regular file could have regular/prealloc extent */
                if (!S_ISREG(inode->vfs_inode.i_mode)) {
-                       ret = -EUCLEAN;
+                       err = -EUCLEAN;
                        btrfs_crit(fs_info,
                "regular/prealloc extent found for non-regular inode %llu",
                                   btrfs_ino(inode));
index 5a9dc31..e529ddb 100644 (file)
@@ -517,6 +517,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
        char *compress_type;
        bool compress_force = false;
        enum btrfs_compression_type saved_compress_type;
+       int saved_compress_level;
        bool saved_compress_force;
        int no_compress = 0;
 
@@ -598,6 +599,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                info->compress_type : BTRFS_COMPRESS_NONE;
                        saved_compress_force =
                                btrfs_test_opt(info, FORCE_COMPRESS);
+                       saved_compress_level = info->compress_level;
                        if (token == Opt_compress ||
                            token == Opt_compress_force ||
                            strncmp(args[0].from, "zlib", 4) == 0) {
@@ -642,6 +644,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                no_compress = 0;
                        } else if (strncmp(args[0].from, "no", 2) == 0) {
                                compress_type = "no";
+                               info->compress_level = 0;
+                               info->compress_type = 0;
                                btrfs_clear_opt(info->mount_opt, COMPRESS);
                                btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
                                compress_force = false;
@@ -662,11 +666,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                                 */
                                btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
                        }
-                       if ((btrfs_test_opt(info, COMPRESS) &&
-                            (info->compress_type != saved_compress_type ||
-                             compress_force != saved_compress_force)) ||
-                           (!btrfs_test_opt(info, COMPRESS) &&
-                            no_compress == 1)) {
+                       if (no_compress == 1) {
+                               btrfs_info(info, "use no compression");
+                       } else if ((info->compress_type != saved_compress_type) ||
+                                  (compress_force != saved_compress_force) ||
+                                  (info->compress_level != saved_compress_level)) {
                                btrfs_info(info, "%s %s compression, level %d",
                                           (compress_force) ? "force" : "use",
                                           compress_type, info->compress_level);
@@ -1382,6 +1386,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
        struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
        const char *compress_type;
+       const char *subvol_name;
 
        if (btrfs_test_opt(info, DEGRADED))
                seq_puts(seq, ",degraded");
@@ -1468,8 +1473,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",ref_verify");
        seq_printf(seq, ",subvolid=%llu",
                  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
-       seq_puts(seq, ",subvol=");
-       seq_dentry(seq, dentry, " \t\n\\");
+       subvol_name = btrfs_get_subvol_name_from_objectid(info,
+                       BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+       if (!IS_ERR(subvol_name)) {
+               seq_puts(seq, ",subvol=");
+               seq_escape(seq, subvol_name, " \t\n\\");
+               kfree(subvol_name);
+       }
        return 0;
 }
 
@@ -1950,6 +1960,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
 out:
+       /*
+        * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
+        * since the absence of the flag means it can be toggled off by remount.
+        */
+       *flags |= SB_I_VERSION;
+
        wake_up_process(fs_info->transaction_kthread);
        btrfs_remount_cleanup(fs_info, old_opts);
        clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
index 104c80c..c8df2ed 100644 (file)
@@ -1565,9 +1565,11 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info)
        rbtree_postorder_for_each_entry_safe(qgroup, next,
                                             &fs_info->qgroup_tree, node)
                btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
-       kobject_del(fs_info->qgroups_kobj);
-       kobject_put(fs_info->qgroups_kobj);
-       fs_info->qgroups_kobj = NULL;
+       if (fs_info->qgroups_kobj) {
+               kobject_del(fs_info->qgroups_kobj);
+               kobject_put(fs_info->qgroups_kobj);
+               fs_info->qgroups_kobj = NULL;
+       }
 }
 
 /* Called when qgroups get initialized, thus there is no need for locking */
index ea8136d..696dd86 100644 (file)
@@ -4036,11 +4036,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                                fs_info->csum_root,
                                                ds + cs, ds + cs + cl - 1,
                                                &ordered_sums, 0);
-                               if (ret) {
-                                       btrfs_release_path(dst_path);
-                                       kfree(ins_data);
-                                       return ret;
-                               }
+                               if (ret)
+                                       break;
                        }
                }
        }
@@ -4053,7 +4050,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
         * we have to do this after the loop above to avoid changing the
         * log tree while trying to change the log tree.
         */
-       ret = 0;
        while (!list_empty(&ordered_sums)) {
                struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
                                                   struct btrfs_ordered_sum,
index d7670e2..ee96c58 100644 (file)
@@ -4720,6 +4720,10 @@ again:
        }
 
        mutex_lock(&fs_info->chunk_mutex);
+       /* Clear all state bits beyond the shrunk device size */
+       clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
+                         CHUNK_STATE_MASK);
+
        btrfs_device_set_disk_total_bytes(device, new_size);
        if (list_empty(&device->post_commit_list))
                list_add_tail(&device->post_commit_list,
index 061dd20..d468ed9 100644 (file)
@@ -3157,6 +3157,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
        WARN_ON(atomic_read(&bh->b_count) < 1);
        lock_buffer(bh);
        if (test_clear_buffer_dirty(bh)) {
+               /*
+                * The bh should be mapped, but it might not be if the
+                * device was hot-removed. Not much we can do but fail the I/O.
+                */
+               if (!buffer_mapped(bh)) {
+                       unlock_buffer(bh);
+                       return -EIO;
+               }
+
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
                ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
index cf235f6..471e401 100644 (file)
@@ -13,7 +13,7 @@ config CEPH_FS
          scalable file system designed to provide high performance,
          reliable access to petabytes of storage.
 
-         More information at http://ceph.newdream.net/.
+         More information at https://ceph.io/.
 
          If unsure, say N.
 
index 01ad097..6ea761c 100644 (file)
@@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req)
 
        osd_data = osd_req_op_extent_osd_data(req, 0);
        if (osd_data->pages_from_pool)
-               mempool_free(osd_data->pages,
-                            ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
+               mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
        else
                kfree(osd_data->pages);
        ceph_osdc_put_request(req);
@@ -955,10 +954,10 @@ retry:
                int num_ops = 0, op_idx;
                unsigned i, pvec_pages, max_pages, locked_pages = 0;
                struct page **pages = NULL, **data_pages;
-               mempool_t *pool = NULL; /* Becomes non-null if mempool used */
                struct page *page;
                pgoff_t strip_unit_end = 0;
                u64 offset = 0, len = 0;
+               bool from_pool = false;
 
                max_pages = wsize >> PAGE_SHIFT;
 
@@ -1057,16 +1056,16 @@ get_more_pages:
                                                      sizeof(*pages),
                                                      GFP_NOFS);
                                if (!pages) {
-                                       pool = fsc->wb_pagevec_pool;
-                                       pages = mempool_alloc(pool, GFP_NOFS);
+                                       from_pool = true;
+                                       pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
                                        BUG_ON(!pages);
                                }
 
                                len = 0;
                        } else if (page->index !=
                                   (offset + len) >> PAGE_SHIFT) {
-                               if (num_ops >= (pool ?  CEPH_OSD_SLAB_OPS :
-                                                       CEPH_OSD_MAX_OPS)) {
+                               if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
+                                                            CEPH_OSD_MAX_OPS)) {
                                        redirty_page_for_writepage(wbc, page);
                                        unlock_page(page);
                                        break;
@@ -1161,7 +1160,7 @@ new_request:
                                     offset, len);
                                osd_req_op_extent_osd_data_pages(req, op_idx,
                                                        data_pages, len, 0,
-                                                       !!pool, false);
+                                                       from_pool, false);
                                osd_req_op_extent_update(req, op_idx, len);
 
                                len = 0;
@@ -1188,12 +1187,12 @@ new_request:
                dout("writepages got pages at %llu~%llu\n", offset, len);
 
                osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
-                                                0, !!pool, false);
+                                                0, from_pool, false);
                osd_req_op_extent_update(req, op_idx, len);
 
                BUG_ON(op_idx + 1 != req->r_num_ops);
 
-               pool = NULL;
+               from_pool = false;
                if (i < locked_pages) {
                        BUG_ON(num_ops <= req->r_num_ops);
                        num_ops -= req->r_num_ops;
@@ -1204,8 +1203,8 @@ new_request:
                        pages = kmalloc_array(locked_pages, sizeof(*pages),
                                              GFP_NOFS);
                        if (!pages) {
-                               pool = fsc->wb_pagevec_pool;
-                               pages = mempool_alloc(pool, GFP_NOFS);
+                               from_pool = true;
+                               pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
                                BUG_ON(!pages);
                        }
                        memcpy(pages, data_pages + i,
index 972c13a..55ccccf 100644 (file)
@@ -668,6 +668,7 @@ void ceph_add_cap(struct inode *inode,
                spin_lock(&session->s_cap_lock);
                list_add_tail(&cap->session_caps, &session->s_caps);
                session->s_nr_caps++;
+               atomic64_inc(&mdsc->metric.total_caps);
                spin_unlock(&session->s_cap_lock);
        } else {
                spin_lock(&session->s_cap_lock);
@@ -1161,6 +1162,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
        } else {
                list_del_init(&cap->session_caps);
                session->s_nr_caps--;
+               atomic64_dec(&mdsc->metric.total_caps);
                cap->session = NULL;
                removed = 1;
        }
@@ -4187,10 +4189,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
        struct ceph_inode_info *ci;
 
        dout("check_delayed_caps\n");
-       while (1) {
-               spin_lock(&mdsc->cap_delay_lock);
-               if (list_empty(&mdsc->cap_delay_list))
-                       break;
+       spin_lock(&mdsc->cap_delay_lock);
+       while (!list_empty(&mdsc->cap_delay_list)) {
                ci = list_first_entry(&mdsc->cap_delay_list,
                                      struct ceph_inode_info,
                                      i_cap_delay_list);
@@ -4200,13 +4200,13 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                list_del_init(&ci->i_cap_delay_list);
 
                inode = igrab(&ci->vfs_inode);
-               spin_unlock(&mdsc->cap_delay_lock);
-
                if (inode) {
+                       spin_unlock(&mdsc->cap_delay_lock);
                        dout("check_delayed_caps on %p\n", inode);
                        ceph_check_caps(ci, 0, NULL);
                        /* avoid calling iput_final() in tick thread */
                        ceph_async_iput(inode);
+                       spin_lock(&mdsc->cap_delay_lock);
                }
        }
        spin_unlock(&mdsc->cap_delay_lock);
index 070ed84..97539b4 100644 (file)
@@ -145,7 +145,7 @@ static int metric_show(struct seq_file *s, void *p)
        struct ceph_fs_client *fsc = s->private;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_client_metric *m = &mdsc->metric;
-       int i, nr_caps = 0;
+       int nr_caps = 0;
        s64 total, sum, avg, min, max, sq;
 
        seq_printf(s, "item          total       avg_lat(us)     min_lat(us)     max_lat(us)     stdev(us)\n");
@@ -190,17 +190,7 @@ static int metric_show(struct seq_file *s, void *p)
                   percpu_counter_sum(&m->d_lease_mis),
                   percpu_counter_sum(&m->d_lease_hit));
 
-       mutex_lock(&mdsc->mutex);
-       for (i = 0; i < mdsc->max_sessions; i++) {
-               struct ceph_mds_session *s;
-
-               s = __ceph_lookup_mds_session(mdsc, i);
-               if (!s)
-                       continue;
-               nr_caps += s->s_nr_caps;
-               ceph_put_mds_session(s);
-       }
-       mutex_unlock(&mdsc->mutex);
+       nr_caps = atomic64_read(&m->total_caps);
        seq_printf(s, "%-14s%-16d%-16lld%lld\n", "caps", nr_caps,
                   percpu_counter_sum(&m->i_caps_mis),
                   percpu_counter_sum(&m->i_caps_hit));
@@ -272,7 +262,7 @@ static int mds_sessions_show(struct seq_file *s, void *ptr)
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_auth_client *ac = fsc->client->monc.auth;
        struct ceph_options *opt = fsc->client->options;
-       int mds = -1;
+       int mds;
 
        mutex_lock(&mdsc->mutex);
 
index 39f5311..060bdcc 100644 (file)
@@ -930,6 +930,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
        req->r_num_caps = 2;
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+       if (as_ctx.pagelist) {
+               req->r_pagelist = as_ctx.pagelist;
+               as_ctx.pagelist = NULL;
+       }
        err = ceph_mdsc_do_request(mdsc, dir, req);
        if (!err && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
index 160644d..d51c3f2 100644 (file)
@@ -1538,6 +1538,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
        struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct page *pinned_page = NULL;
+       bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
        ssize_t ret;
        int want, got = 0;
        int retry_op = 0, read = 0;
@@ -1546,7 +1547,7 @@ again:
        dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
             inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
 
-       if (iocb->ki_flags & IOCB_DIRECT)
+       if (direct_lock)
                ceph_start_io_direct(inode);
        else
                ceph_start_io_read(inode);
@@ -1603,7 +1604,7 @@ again:
        }
        ceph_put_cap_refs(ci, got);
 
-       if (iocb->ki_flags & IOCB_DIRECT)
+       if (direct_lock)
                ceph_end_io_direct(inode);
        else
                ceph_end_io_read(inode);
index a504971..4a26862 100644 (file)
@@ -1103,8 +1103,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                                     frag.frag, mds);
                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
                                    CEPH_MDS_STATE_ACTIVE) {
-                                       if (mode == USE_ANY_MDS &&
-                                           !ceph_mdsmap_is_laggy(mdsc->mdsmap,
+                                       if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
                                                                  mds))
                                                goto out;
                                }
@@ -1168,7 +1167,7 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
 
 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
-static void encode_supported_features(void **p, void *end)
+static int encode_supported_features(void **p, void *end)
 {
        static const size_t count = ARRAY_SIZE(feature_bits);
 
@@ -1176,16 +1175,64 @@ static void encode_supported_features(void **p, void *end)
                size_t i;
                size_t size = FEATURE_BYTES(count);
 
-               BUG_ON(*p + 4 + size > end);
+               if (WARN_ON_ONCE(*p + 4 + size > end))
+                       return -ERANGE;
+
                ceph_encode_32(p, size);
                memset(*p, 0, size);
                for (i = 0; i < count; i++)
                        ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
                *p += size;
        } else {
-               BUG_ON(*p + 4 > end);
+               if (WARN_ON_ONCE(*p + 4 > end))
+                       return -ERANGE;
+
                ceph_encode_32(p, 0);
        }
+
+       return 0;
+}
+
+static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
+#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
+static int encode_metric_spec(void **p, void *end)
+{
+       static const size_t count = ARRAY_SIZE(metric_bits);
+
+       /* header */
+       if (WARN_ON_ONCE(*p + 2 > end))
+               return -ERANGE;
+
+       ceph_encode_8(p, 1); /* version */
+       ceph_encode_8(p, 1); /* compat */
+
+       if (count > 0) {
+               size_t i;
+               size_t size = METRIC_BYTES(count);
+
+               if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
+                       return -ERANGE;
+
+               /* metric spec info length */
+               ceph_encode_32(p, 4 + size);
+
+               /* metric spec */
+               ceph_encode_32(p, size);
+               memset(*p, 0, size);
+               for (i = 0; i < count; i++)
+                       ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
+               *p += size;
+       } else {
+               if (WARN_ON_ONCE(*p + 4 + 4 > end))
+                       return -ERANGE;
+
+               /* metric spec info length */
+               ceph_encode_32(p, 4);
+               /* metric spec */
+               ceph_encode_32(p, 0);
+       }
+
+       return 0;
 }
 
 /*
@@ -1203,6 +1250,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
        size_t size, count;
        void *p, *end;
+       int ret;
 
        const char* metadata[][2] = {
                {"hostname", mdsc->nodename},
@@ -1227,12 +1275,19 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
                size = FEATURE_BYTES(count);
        extra_bytes += 4 + size;
 
+       /* metric spec */
+       size = 0;
+       count = ARRAY_SIZE(metric_bits);
+       if (count > 0)
+               size = METRIC_BYTES(count);
+       extra_bytes += 2 + 4 + 4 + size;
+
        /* Allocate the message */
        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
                           GFP_NOFS, false);
        if (!msg) {
                pr_err("create_session_msg ENOMEM creating msg\n");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
        p = msg->front.iov_base;
        end = p + msg->front.iov_len;
@@ -1245,9 +1300,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
         * Serialize client metadata into waiting buffer space, using
         * the format that userspace expects for map<string, string>
         *
-        * ClientSession messages with metadata are v3
+        * ClientSession messages with metadata are v4
         */
-       msg->hdr.version = cpu_to_le16(3);
+       msg->hdr.version = cpu_to_le16(4);
        msg->hdr.compat_version = cpu_to_le16(1);
 
        /* The write pointer, following the session_head structure */
@@ -1269,7 +1324,20 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
                p += val_len;
        }
 
-       encode_supported_features(&p, end);
+       ret = encode_supported_features(&p, end);
+       if (ret) {
+               pr_err("encode_supported_features failed!\n");
+               ceph_msg_put(msg);
+               return ERR_PTR(ret);
+       }
+
+       ret = encode_metric_spec(&p, end);
+       if (ret) {
+               pr_err("encode_metric_spec failed!\n");
+               ceph_msg_put(msg);
+               return ERR_PTR(ret);
+       }
+
        msg->front.iov_len = p - msg->front.iov_base;
        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
 
@@ -1297,8 +1365,8 @@ static int __open_session(struct ceph_mds_client *mdsc,
 
        /* send connect message */
        msg = create_session_open_msg(mdsc, session->s_seq);
-       if (!msg)
-               return -ENOMEM;
+       if (IS_ERR(msg))
+               return PTR_ERR(msg);
        ceph_con_send(&session->s_con, msg);
        return 0;
 }
@@ -1312,6 +1380,7 @@ static struct ceph_mds_session *
 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
 {
        struct ceph_mds_session *session;
+       int ret;
 
        session = __ceph_lookup_mds_session(mdsc, target);
        if (!session) {
@@ -1320,8 +1389,11 @@ __open_export_target_session(struct ceph_mds_client *mdsc, int target)
                        return session;
        }
        if (session->s_state == CEPH_MDS_SESSION_NEW ||
-           session->s_state == CEPH_MDS_SESSION_CLOSING)
-               __open_session(mdsc, session);
+           session->s_state == CEPH_MDS_SESSION_CLOSING) {
+               ret = __open_session(mdsc, session);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
 
        return session;
 }
@@ -1485,6 +1557,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
                        cap->session = NULL;
                        list_del_init(&cap->session_caps);
                        session->s_nr_caps--;
+                       atomic64_dec(&session->s_mdsc->metric.total_caps);
                        if (cap->queue_release)
                                __ceph_queue_cap_release(session, cap);
                        else
@@ -1785,8 +1858,7 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
 /*
  * send a session close request
  */
-static int request_close_session(struct ceph_mds_client *mdsc,
-                                struct ceph_mds_session *session)
+static int request_close_session(struct ceph_mds_session *session)
 {
        struct ceph_msg *msg;
 
@@ -1809,7 +1881,7 @@ static int __close_session(struct ceph_mds_client *mdsc,
        if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
                return 0;
        session->s_state = CEPH_MDS_SESSION_CLOSING;
-       return request_close_session(mdsc, session);
+       return request_close_session(session);
 }
 
 static bool drop_negative_children(struct dentry *dentry)
@@ -2520,7 +2592,12 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
                ceph_encode_copy(&p, &ts, sizeof(ts));
        }
 
-       BUG_ON(p > end);
+       if (WARN_ON_ONCE(p > end)) {
+               ceph_msg_put(msg);
+               msg = ERR_PTR(-ERANGE);
+               goto out_free2;
+       }
+
        msg->front.iov_len = p - msg->front.iov_base;
        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
 
@@ -2756,7 +2833,9 @@ static void __do_request(struct ceph_mds_client *mdsc,
                }
                if (session->s_state == CEPH_MDS_SESSION_NEW ||
                    session->s_state == CEPH_MDS_SESSION_CLOSING) {
-                       __open_session(mdsc, session);
+                       err = __open_session(mdsc, session);
+                       if (err)
+                               goto out_session;
                        /* retry the same mds later */
                        if (random)
                                req->r_resend_mds = mds;
@@ -3279,8 +3358,10 @@ static void handle_session(struct ceph_mds_session *session,
                        goto bad;
                /* version >= 3, feature bits */
                ceph_decode_32_safe(&p, end, len, bad);
-               ceph_decode_64_safe(&p, end, features, bad);
-               p += len - sizeof(features);
+               if (len) {
+                       ceph_decode_64_safe(&p, end, features, bad);
+                       p += len - sizeof(features);
+               }
        }
 
        mutex_lock(&mdsc->mutex);
@@ -3310,6 +3391,8 @@ static void handle_session(struct ceph_mds_session *session,
                session->s_state = CEPH_MDS_SESSION_OPEN;
                session->s_features = features;
                renewed_caps(mdsc, session, 0);
+               if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
+                       metric_schedule_delayed(&mdsc->metric);
                wake = 1;
                if (mdsc->stopping)
                        __close_session(mdsc, session);
@@ -4263,6 +4346,30 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
        ceph_force_reconnect(fsc->sb);
 }
 
+bool check_session_state(struct ceph_mds_session *s)
+{
+       if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
+               dout("resending session close request for mds%d\n",
+                               s->s_mds);
+               request_close_session(s);
+               return false;
+       }
+       if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
+               if (s->s_state == CEPH_MDS_SESSION_OPEN) {
+                       s->s_state = CEPH_MDS_SESSION_HUNG;
+                       pr_info("mds%d hung\n", s->s_mds);
+               }
+       }
+       if (s->s_state == CEPH_MDS_SESSION_NEW ||
+           s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+           s->s_state == CEPH_MDS_SESSION_CLOSED ||
+           s->s_state == CEPH_MDS_SESSION_REJECTED)
+               /* this mds is failed or recovering, just wait */
+               return false;
+
+       return true;
+}
+
 /*
  * delayed work -- periodically trim expired leases, renew caps with mds
  */
@@ -4283,6 +4390,9 @@ static void delayed_work(struct work_struct *work)
 
        dout("mdsc delayed_work\n");
 
+       if (mdsc->stopping)
+               return;
+
        mutex_lock(&mdsc->mutex);
        renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
        renew_caps = time_after_eq(jiffies, HZ*renew_interval +
@@ -4294,23 +4404,8 @@ static void delayed_work(struct work_struct *work)
                struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
                if (!s)
                        continue;
-               if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
-                       dout("resending session close request for mds%d\n",
-                            s->s_mds);
-                       request_close_session(mdsc, s);
-                       ceph_put_mds_session(s);
-                       continue;
-               }
-               if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
-                       if (s->s_state == CEPH_MDS_SESSION_OPEN) {
-                               s->s_state = CEPH_MDS_SESSION_HUNG;
-                               pr_info("mds%d hung\n", s->s_mds);
-                       }
-               }
-               if (s->s_state == CEPH_MDS_SESSION_NEW ||
-                   s->s_state == CEPH_MDS_SESSION_RESTARTING ||
-                   s->s_state == CEPH_MDS_SESSION_REJECTED) {
-                       /* this mds is failed or recovering, just wait */
+
+               if (!check_session_state(s)) {
                        ceph_put_mds_session(s);
                        continue;
                }
@@ -4359,7 +4454,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
                goto err_mdsc;
        }
 
-       fsc->mdsc = mdsc;
        init_completion(&mdsc->safe_umount_waiters);
        init_waitqueue_head(&mdsc->session_close_wq);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
@@ -4414,6 +4508,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
 
        strscpy(mdsc->nodename, utsname()->nodename,
                sizeof(mdsc->nodename));
+
+       fsc->mdsc = mdsc;
        return 0;
 
 err_mdsmap:
@@ -4657,7 +4753,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
 {
        dout("stop\n");
-       cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
+       /*
+        * Make sure the delayed work stopped before releasing
+        * the resources.
+        *
+        * Because the cancel_delayed_work_sync() will only
+        * guarantee that the work finishes executing. But the
+        * delayed work will re-arm itself again after that.
+        */
+       flush_delayed_work(&mdsc->delayed_work);
+
        if (mdsc->mdsmap)
                ceph_mdsmap_destroy(mdsc->mdsmap);
        kfree(mdsc->sessions);
@@ -4680,6 +4785,7 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
 
        ceph_metric_destroy(&mdsc->metric);
 
+       flush_delayed_work(&mdsc->metric.delayed_work);
        fsc->mdsc = NULL;
        kfree(mdsc);
        dout("mdsc_destroy %p done\n", mdsc);
index 5e0c407..bc9e959 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/ceph/auth.h>
 
 #include "metric.h"
+#include "super.h"
 
 /* The first 8 bits are reserved for old ceph releases */
 enum ceph_feature_type {
@@ -27,8 +28,9 @@ enum ceph_feature_type {
        CEPHFS_FEATURE_LAZY_CAP_WANTED,
        CEPHFS_FEATURE_MULTI_RECONNECT,
        CEPHFS_FEATURE_DELEG_INO,
+       CEPHFS_FEATURE_METRIC_COLLECT,
 
-       CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_DELEG_INO,
+       CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT,
 };
 
 /*
@@ -42,6 +44,7 @@ enum ceph_feature_type {
        CEPHFS_FEATURE_LAZY_CAP_WANTED,         \
        CEPHFS_FEATURE_MULTI_RECONNECT,         \
        CEPHFS_FEATURE_DELEG_INO,               \
+       CEPHFS_FEATURE_METRIC_COLLECT,          \
                                                \
        CEPHFS_FEATURE_MAX,                     \
 }
@@ -476,6 +479,8 @@ struct ceph_mds_client {
 
 extern const char *ceph_mds_op_name(int op);
 
+extern bool check_session_state(struct ceph_mds_session *s);
+
 extern struct ceph_mds_session *
 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
 
index 8896278..e4aba6c 100644 (file)
@@ -120,7 +120,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
        const void *start = *p;
        int i, j, n;
        int err;
-       u8 mdsmap_v, mdsmap_cv;
+       u8 mdsmap_v;
        u16 mdsmap_ev;
 
        m = kzalloc(sizeof(*m), GFP_NOFS);
@@ -129,7 +129,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 
        ceph_decode_need(p, end, 1 + 1, bad);
        mdsmap_v = ceph_decode_8(p);
-       mdsmap_cv = ceph_decode_8(p);
+       *p += sizeof(u8);                       /* mdsmap_cv */
        if (mdsmap_v >= 4) {
               u32 mdsmap_len;
               ceph_decode_32_safe(p, end, mdsmap_len, bad);
@@ -174,7 +174,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                u64 global_id;
                u32 namelen;
                s32 mds, inc, state;
-               u64 state_seq;
                u8 info_v;
                void *info_end = NULL;
                struct ceph_entity_addr addr;
@@ -189,9 +188,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                info_v= ceph_decode_8(p);
                if (info_v >= 4) {
                        u32 info_len;
-                       u8 info_cv;
                        ceph_decode_need(p, end, 1 + sizeof(u32), bad);
-                       info_cv = ceph_decode_8(p);
+                       *p += sizeof(u8);       /* info_cv */
                        info_len = ceph_decode_32(p);
                        info_end = *p + info_len;
                        if (info_end > end)
@@ -210,7 +208,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                mds = ceph_decode_32(p);
                inc = ceph_decode_32(p);
                state = ceph_decode_32(p);
-               state_seq = ceph_decode_64(p);
+               *p += sizeof(u64);              /* state_seq */
                err = ceph_decode_entity_addr(p, end, &addr);
                if (err)
                        goto corrupt;
index 9217f35..2466b26 100644 (file)
 /* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/ceph/ceph_debug.h>
 
 #include <linux/types.h>
 #include <linux/percpu_counter.h>
 #include <linux/math64.h>
 
 #include "metric.h"
+#include "mds_client.h"
+
+static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
+                                  struct ceph_mds_session *s)
+{
+       struct ceph_metric_head *head;
+       struct ceph_metric_cap *cap;
+       struct ceph_metric_read_latency *read;
+       struct ceph_metric_write_latency *write;
+       struct ceph_metric_metadata_latency *meta;
+       struct ceph_client_metric *m = &mdsc->metric;
+       u64 nr_caps = atomic64_read(&m->total_caps);
+       struct ceph_msg *msg;
+       struct timespec64 ts;
+       s64 sum;
+       s32 items = 0;
+       s32 len;
+
+       len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
+             + sizeof(*meta);
+
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
+       if (!msg) {
+               pr_err("send metrics to mds%d, failed to allocate message\n",
+                      s->s_mds);
+               return false;
+       }
+
+       head = msg->front.iov_base;
+
+       /* encode the cap metric */
+       cap = (struct ceph_metric_cap *)(head + 1);
+       cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
+       cap->ver = 1;
+       cap->compat = 1;
+       cap->data_len = cpu_to_le32(sizeof(*cap) - 10);
+       cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit));
+       cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis));
+       cap->total = cpu_to_le64(nr_caps);
+       items++;
+
+       /* encode the read latency metric */
+       read = (struct ceph_metric_read_latency *)(cap + 1);
+       read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
+       read->ver = 1;
+       read->compat = 1;
+       read->data_len = cpu_to_le32(sizeof(*read) - 10);
+       sum = m->read_latency_sum;
+       jiffies_to_timespec64(sum, &ts);
+       read->sec = cpu_to_le32(ts.tv_sec);
+       read->nsec = cpu_to_le32(ts.tv_nsec);
+       items++;
+
+       /* encode the write latency metric */
+       write = (struct ceph_metric_write_latency *)(read + 1);
+       write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
+       write->ver = 1;
+       write->compat = 1;
+       write->data_len = cpu_to_le32(sizeof(*write) - 10);
+       sum = m->write_latency_sum;
+       jiffies_to_timespec64(sum, &ts);
+       write->sec = cpu_to_le32(ts.tv_sec);
+       write->nsec = cpu_to_le32(ts.tv_nsec);
+       items++;
+
+       /* encode the metadata latency metric */
+       meta = (struct ceph_metric_metadata_latency *)(write + 1);
+       meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
+       meta->ver = 1;
+       meta->compat = 1;
+       meta->data_len = cpu_to_le32(sizeof(*meta) - 10);
+       sum = m->metadata_latency_sum;
+       jiffies_to_timespec64(sum, &ts);
+       meta->sec = cpu_to_le32(ts.tv_sec);
+       meta->nsec = cpu_to_le32(ts.tv_nsec);
+       items++;
+
+       put_unaligned_le32(items, &head->num);
+       msg->front.iov_len = len;
+       msg->hdr.version = cpu_to_le16(1);
+       msg->hdr.compat_version = cpu_to_le16(1);
+       msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+       dout("client%llu send metrics to mds%d\n",
+            ceph_client_gid(mdsc->fsc->client), s->s_mds);
+       ceph_con_send(&s->s_con, msg);
+
+       return true;
+}
+
+
+static void metric_get_session(struct ceph_mds_client *mdsc)
+{
+       struct ceph_mds_session *s;
+       int i;
+
+       mutex_lock(&mdsc->mutex);
+       for (i = 0; i < mdsc->max_sessions; i++) {
+               s = __ceph_lookup_mds_session(mdsc, i);
+               if (!s)
+                       continue;
+
+               /*
+                * Skip it if MDS doesn't support the metric collection,
+                * or the MDS will close the session's socket connection
+                * directly when it get this message.
+                */
+               if (check_session_state(s) &&
+                   test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
+                       mdsc->metric.session = s;
+                       break;
+               }
+
+               ceph_put_mds_session(s);
+       }
+       mutex_unlock(&mdsc->mutex);
+}
+
+static void metric_delayed_work(struct work_struct *work)
+{
+       struct ceph_client_metric *m =
+               container_of(work, struct ceph_client_metric, delayed_work.work);
+       struct ceph_mds_client *mdsc =
+               container_of(m, struct ceph_mds_client, metric);
+
+       if (mdsc->stopping)
+               return;
+
+       if (!m->session || !check_session_state(m->session)) {
+               if (m->session) {
+                       ceph_put_mds_session(m->session);
+                       m->session = NULL;
+               }
+               metric_get_session(mdsc);
+       }
+       if (m->session) {
+               ceph_mdsc_send_metrics(mdsc, m->session);
+               metric_schedule_delayed(m);
+       }
+}
 
 int ceph_metric_init(struct ceph_client_metric *m)
 {
@@ -22,6 +162,7 @@ int ceph_metric_init(struct ceph_client_metric *m)
        if (ret)
                goto err_d_lease_mis;
 
+       atomic64_set(&m->total_caps, 0);
        ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
        if (ret)
                goto err_i_caps_hit;
@@ -51,6 +192,9 @@ int ceph_metric_init(struct ceph_client_metric *m)
        m->total_metadatas = 0;
        m->metadata_latency_sum = 0;
 
+       m->session = NULL;
+       INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
+
        return 0;
 
 err_i_caps_mis:
@@ -72,6 +216,11 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
        percpu_counter_destroy(&m->i_caps_hit);
        percpu_counter_destroy(&m->d_lease_mis);
        percpu_counter_destroy(&m->d_lease_hit);
+
+       cancel_delayed_work_sync(&m->delayed_work);
+
+       if (m->session)
+               ceph_put_mds_session(m->session);
 }
 
 static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
index ccd8128..1d0959d 100644 (file)
@@ -6,12 +6,91 @@
 #include <linux/percpu_counter.h>
 #include <linux/ktime.h>
 
+extern bool disable_send_metrics;
+
+enum ceph_metric_type {
+       CLIENT_METRIC_TYPE_CAP_INFO,
+       CLIENT_METRIC_TYPE_READ_LATENCY,
+       CLIENT_METRIC_TYPE_WRITE_LATENCY,
+       CLIENT_METRIC_TYPE_METADATA_LATENCY,
+       CLIENT_METRIC_TYPE_DENTRY_LEASE,
+
+       CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_DENTRY_LEASE,
+};
+
+/*
+ * This will always have the highest metric bit value
+ * as the last element of the array.
+ */
+#define CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED {  \
+       CLIENT_METRIC_TYPE_CAP_INFO,            \
+       CLIENT_METRIC_TYPE_READ_LATENCY,        \
+       CLIENT_METRIC_TYPE_WRITE_LATENCY,       \
+       CLIENT_METRIC_TYPE_METADATA_LATENCY,    \
+                                               \
+       CLIENT_METRIC_TYPE_MAX,                 \
+}
+
+/* metric caps header */
+struct ceph_metric_cap {
+       __le32 type;     /* ceph metric type */
+
+       __u8  ver;
+       __u8  compat;
+
+       __le32 data_len; /* length of sizeof(hit + mis + total) */
+       __le64 hit;
+       __le64 mis;
+       __le64 total;
+} __packed;
+
+/* metric read latency header */
+struct ceph_metric_read_latency {
+       __le32 type;     /* ceph metric type */
+
+       __u8  ver;
+       __u8  compat;
+
+       __le32 data_len; /* length of sizeof(sec + nsec) */
+       __le32 sec;
+       __le32 nsec;
+} __packed;
+
+/* metric write latency header */
+struct ceph_metric_write_latency {
+       __le32 type;     /* ceph metric type */
+
+       __u8  ver;
+       __u8  compat;
+
+       __le32 data_len; /* length of sizeof(sec + nsec) */
+       __le32 sec;
+       __le32 nsec;
+} __packed;
+
+/* metric metadata latency header */
+struct ceph_metric_metadata_latency {
+       __le32 type;     /* ceph metric type */
+
+       __u8  ver;
+       __u8  compat;
+
+       __le32 data_len; /* length of sizeof(sec + nsec) */
+       __le32 sec;
+       __le32 nsec;
+} __packed;
+
+struct ceph_metric_head {
+       __le32 num;     /* the number of metrics that will be sent */
+} __packed;
+
 /* This is the global metrics */
 struct ceph_client_metric {
        atomic64_t            total_dentries;
        struct percpu_counter d_lease_hit;
        struct percpu_counter d_lease_mis;
 
+       atomic64_t            total_caps;
        struct percpu_counter i_caps_hit;
        struct percpu_counter i_caps_mis;
 
@@ -35,8 +114,20 @@ struct ceph_client_metric {
        ktime_t metadata_latency_sq_sum;
        ktime_t metadata_latency_min;
        ktime_t metadata_latency_max;
+
+       struct ceph_mds_session *session;
+       struct delayed_work delayed_work;  /* delayed work */
 };
 
+static inline void metric_schedule_delayed(struct ceph_client_metric *m)
+{
+       if (disable_send_metrics)
+               return;
+
+       /* per second */
+       schedule_delayed_work(&m->delayed_work, round_jiffies_relative(HZ));
+}
+
 extern int ceph_metric_init(struct ceph_client_metric *m);
 extern void ceph_metric_destroy(struct ceph_client_metric *m);
 
index c9784eb..7ec0e6d 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/ceph/auth.h>
 #include <linux/ceph/debugfs.h>
 
+static DEFINE_SPINLOCK(ceph_fsc_lock);
+static LIST_HEAD(ceph_fsc_list);
+
 /*
  * Ceph superblock operations
  *
@@ -634,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
                                        struct ceph_options *opt)
 {
        struct ceph_fs_client *fsc;
-       int page_count;
-       size_t size;
        int err;
 
        fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
@@ -683,18 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        if (!fsc->cap_wq)
                goto fail_inode_wq;
 
-       /* set up mempools */
-       err = -ENOMEM;
-       page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
-       size = sizeof (struct page *) * (page_count ? page_count : 1);
-       fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
-       if (!fsc->wb_pagevec_pool)
-               goto fail_cap_wq;
+       spin_lock(&ceph_fsc_lock);
+       list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
+       spin_unlock(&ceph_fsc_lock);
 
        return fsc;
 
-fail_cap_wq:
-       destroy_workqueue(fsc->cap_wq);
 fail_inode_wq:
        destroy_workqueue(fsc->inode_wq);
 fail_client:
@@ -717,12 +712,14 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
 {
        dout("destroy_fs_client %p\n", fsc);
 
+       spin_lock(&ceph_fsc_lock);
+       list_del(&fsc->metric_wakeup);
+       spin_unlock(&ceph_fsc_lock);
+
        ceph_mdsc_destroy(fsc);
        destroy_workqueue(fsc->inode_wq);
        destroy_workqueue(fsc->cap_wq);
 
-       mempool_destroy(fsc->wb_pagevec_pool);
-
        destroy_mount_options(fsc->mount_options);
 
        ceph_destroy_client(fsc->client);
@@ -741,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep;
 struct kmem_cache *ceph_file_cachep;
 struct kmem_cache *ceph_dir_file_cachep;
 struct kmem_cache *ceph_mds_request_cachep;
+mempool_t *ceph_wb_pagevec_pool;
 
 static void ceph_inode_init_once(void *foo)
 {
@@ -785,6 +783,10 @@ static int __init init_caches(void)
        if (!ceph_mds_request_cachep)
                goto bad_mds_req;
 
+       ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
+       if (!ceph_wb_pagevec_pool)
+               goto bad_pagevec_pool;
+
        error = ceph_fscache_register();
        if (error)
                goto bad_fscache;
@@ -793,6 +795,8 @@ static int __init init_caches(void)
 
 bad_fscache:
        kmem_cache_destroy(ceph_mds_request_cachep);
+bad_pagevec_pool:
+       mempool_destroy(ceph_wb_pagevec_pool);
 bad_mds_req:
        kmem_cache_destroy(ceph_dir_file_cachep);
 bad_dir_file:
@@ -823,12 +827,13 @@ static void destroy_caches(void)
        kmem_cache_destroy(ceph_file_cachep);
        kmem_cache_destroy(ceph_dir_file_cachep);
        kmem_cache_destroy(ceph_mds_request_cachep);
+       mempool_destroy(ceph_wb_pagevec_pool);
 
        ceph_fscache_unregister();
 }
 
 /*
- * ceph_umount_begin - initiate forced umount.  Tear down down the
+ * ceph_umount_begin - initiate forced umount.  Tear down the
  * mount, skipping steps that may hang while waiting for server(s).
  */
 static void ceph_umount_begin(struct super_block *sb)
@@ -1282,6 +1287,37 @@ static void __exit exit_ceph(void)
        destroy_caches();
 }
 
+static int param_set_metrics(const char *val, const struct kernel_param *kp)
+{
+       struct ceph_fs_client *fsc;
+       int ret;
+
+       ret = param_set_bool(val, kp);
+       if (ret) {
+               pr_err("Failed to parse sending metrics switch value '%s'\n",
+                      val);
+               return ret;
+       } else if (!disable_send_metrics) {
+               // wake up all the mds clients
+               spin_lock(&ceph_fsc_lock);
+               list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
+                       metric_schedule_delayed(&fsc->mdsc->metric);
+               }
+               spin_unlock(&ceph_fsc_lock);
+       }
+
+       return 0;
+}
+
+static const struct kernel_param_ops param_ops_metrics = {
+       .set = param_set_metrics,
+       .get = param_get_bool,
+};
+
+bool disable_send_metrics = false;
+module_param_cb(disable_send_metrics, &param_ops_metrics, &disable_send_metrics, 0644);
+MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
+
 module_init(init_ceph);
 module_exit(exit_ceph);
 
index 5a6cdd3..4c3c964 100644 (file)
@@ -101,6 +101,8 @@ struct ceph_mount_options {
 struct ceph_fs_client {
        struct super_block *sb;
 
+       struct list_head metric_wakeup;
+
        struct ceph_mount_options *mount_options;
        struct ceph_client *client;
 
@@ -116,8 +118,6 @@ struct ceph_fs_client {
 
        struct ceph_mds_client *mdsc;
 
-       /* writeback */
-       mempool_t *wb_pagevec_pool;
        atomic_long_t writeback_count;
 
        struct workqueue_struct *inode_wq;
@@ -353,7 +353,7 @@ struct ceph_inode_info {
        unsigned i_dirty_caps, i_flushing_caps;     /* mask of dirtied fields */
 
        /*
-        * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty
+        * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty
         * is protected by the mdsc->cap_dirty_lock, but each individual item
         * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
         * requires the mdsc->cap_dirty_lock. List presence for an item can
index 71ee34d..3a733ac 100644 (file)
@@ -497,10 +497,10 @@ static int __set_xattr(struct ceph_inode_info *ci,
                kfree(*newxattr);
                *newxattr = NULL;
                if (xattr->should_free_val)
-                       kfree((void *)xattr->val);
+                       kfree(xattr->val);
 
                if (update_xattr) {
-                       kfree((void *)name);
+                       kfree(name);
                        name = xattr->name;
                }
                ci->i_xattrs.names_size -= xattr->name_len;
@@ -566,9 +566,9 @@ static void __free_xattr(struct ceph_inode_xattr *xattr)
        BUG_ON(!xattr);
 
        if (xattr->should_free_name)
-               kfree((void *)xattr->name);
+               kfree(xattr->name);
        if (xattr->should_free_val)
-               kfree((void *)xattr->val);
+               kfree(xattr->val);
 
        kfree(xattr);
 }
@@ -582,9 +582,9 @@ static int __remove_xattr(struct ceph_inode_info *ci,
        rb_erase(&xattr->node, &ci->i_xattrs.index);
 
        if (xattr->should_free_name)
-               kfree((void *)xattr->name);
+               kfree(xattr->name);
        if (xattr->should_free_val)
-               kfree((void *)xattr->val);
+               kfree(xattr->val);
 
        ci->i_xattrs.names_size -= xattr->name_len;
        ci->i_xattrs.vals_size -= xattr->val_len;
index 0ad1309..a275ee3 100644 (file)
@@ -4886,6 +4886,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
                full_path = build_unc_path_to_root(vol, cifs_sb, !!count);
                if (IS_ERR(full_path)) {
                        rc = PTR_ERR(full_path);
+                       full_path = NULL;
                        break;
                }
                /* Chase referral */
index b9db736..eba01d0 100644 (file)
@@ -115,6 +115,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        vars->oparms.fid = &fid;
        vars->oparms.reconnect = false;
        vars->oparms.mode = mode;
+       vars->oparms.cifs_sb = cifs_sb;
 
        rqst[num_rqst].rq_iov = &vars->open_iov[0];
        rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
index 24c2ac3..667d70a 100644 (file)
@@ -3913,7 +3913,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
        case MID_RESPONSE_MALFORMED:
                credits.value = le16_to_cpu(shdr->CreditRequest);
                credits.instance = server->reconnect_instance;
-               /* fall through */
+               fallthrough;
        default:
                rdata->result = -EIO;
        }
@@ -4146,7 +4146,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
        case MID_RESPONSE_MALFORMED:
                credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
                credits.instance = server->reconnect_instance;
-               /* fall through */
+               fallthrough;
        default:
                wdata->result = -EIO;
                break;
index 7237f07..76e7c10 100644 (file)
@@ -153,10 +153,10 @@ int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
        return ret;
 }
 
-static int cn_print_exe_file(struct core_name *cn)
+static int cn_print_exe_file(struct core_name *cn, bool name_only)
 {
        struct file *exe_file;
-       char *pathbuf, *path;
+       char *pathbuf, *path, *ptr;
        int ret;
 
        exe_file = get_mm_exe_file(current->mm);
@@ -175,6 +175,11 @@ static int cn_print_exe_file(struct core_name *cn)
                goto free_buf;
        }
 
+       if (name_only) {
+               ptr = strrchr(path, '/');
+               if (ptr)
+                       path = ptr + 1;
+       }
        ret = cn_esc_printf(cn, "%s", path);
 
 free_buf:
@@ -301,12 +306,16 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
                                              utsname()->nodename);
                                up_read(&uts_sem);
                                break;
-                       /* executable */
+                       /* executable, could be changed by prctl PR_SET_NAME etc */
                        case 'e':
                                err = cn_esc_printf(cn, "%s", current->comm);
                                break;
+                       /* file name of executable */
+                       case 'f':
+                               err = cn_print_exe_file(cn, true);
+                               break;
                        case 'E':
-                               err = cn_print_exe_file(cn);
+                               err = cn_print_exe_file(cn, false);
                                break;
                        /* core limit size */
                        case 'c':
index 11b1672..95341af 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -488,7 +488,7 @@ retry:
                if (dax_is_conflict(entry))
                        goto fallback;
                if (!xa_is_value(entry)) {
-                       xas_set_err(xas, EIO);
+                       xas_set_err(xas, -EIO);
                        goto out_unlock;
                }
 
@@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
        return __dax_invalidate_entry(mapping, index, false);
 }
 
-static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
-               sector_t sector, size_t size, struct page *to,
-               unsigned long vaddr)
+static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
+                            sector_t sector, struct page *to, unsigned long vaddr)
 {
        void *vto, *kaddr;
        pgoff_t pgoff;
        long rc;
        int id;
 
-       rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+       rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
        if (rc)
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
+       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
@@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
                        clear_user_highpage(vmf->cow_page, vaddr);
                        break;
                case IOMAP_MAPPED:
-                       error = copy_user_dax(iomap.bdev, iomap.dax_dev,
-                                       sector, PAGE_SIZE, vmf->cow_page, vaddr);
+                       error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
+                                                 sector, vmf->cow_page, vaddr);
                        break;
                default:
                        WARN_ON_ONCE(1);
index 361ea7a..ea04858 100644 (file)
@@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        dentry->d_lockref.count = 1;
        dentry->d_flags = 0;
        spin_lock_init(&dentry->d_lock);
-       seqcount_init(&dentry->d_seq);
+       seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
        dentry->d_inode = NULL;
        dentry->d_parent = dentry;
        dentry->d_sb = sb;
index 3698252..a91003e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -141,12 +141,14 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
        if (IS_ERR(file))
                goto out;
 
-       error = -EINVAL;
-       if (!S_ISREG(file_inode(file)->i_mode))
-               goto exit;
-
+       /*
+        * may_open() has already checked for this, so it should be
+        * impossible to trip now. But we need to be extra cautious
+        * and check again at the very end too.
+        */
        error = -EACCES;
-       if (path_noexec(&file->f_path))
+       if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+                        path_noexec(&file->f_path)))
                goto exit;
 
        fsnotify_open(file);
@@ -215,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
         * We are doing an exec().  'current' is the process
         * doing the exec and bprm->mm is the new process's mm.
         */
-       ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
+       ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
                        &page, NULL, NULL);
        if (ret <= 0)
                return NULL;
@@ -909,11 +911,14 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
        if (IS_ERR(file))
                goto out;
 
+       /*
+        * may_open() has already checked for this, so it should be
+        * impossible to trip now. But we need to be extra cautious
+        * and check again at the very end too.
+        */
        err = -EACCES;
-       if (!S_ISREG(file_inode(file)->i_mode))
-               goto exit;
-
-       if (path_noexec(&file->f_path))
+       if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+                        path_noexec(&file->f_path)))
                goto exit;
 
        err = deny_write_access(file);
@@ -1402,7 +1407,12 @@ int begin_new_exec(struct linux_binprm * bprm)
        if (retval)
                goto out_unlock;
 
-       set_fs(USER_DS);
+       /*
+        * Ensure that the uaccess routines can actually operate on userspace
+        * pointers:
+        */
+       force_uaccess_begin();
+
        me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
                                        PF_NOFREEZE | PF_NO_SETAFFINITY);
        flush_thread();
index 4055eb0..a987919 100644 (file)
@@ -158,7 +158,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
        b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
 
        set_bit_le(b, sbi->vol_amap[i]->b_data);
-       exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
+       exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
        return 0;
 }
 
@@ -180,7 +180,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
        b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
 
        clear_bit_le(b, sbi->vol_amap[i]->b_data);
-       exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode));
+       exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
 
        if (opts->discard) {
                int ret_discard;
index 119abf0..573659b 100644 (file)
@@ -470,7 +470,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
                        &ep->dentry.file.access_date,
                        NULL);
 
-       exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+       exfat_update_bh(bh, IS_DIRSYNC(inode));
        brelse(bh);
 
        ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
@@ -480,7 +480,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
        exfat_init_stream_entry(ep,
                (type == TYPE_FILE) ? ALLOC_FAT_CHAIN : ALLOC_NO_FAT_CHAIN,
                start_clu, size);
-       exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+       exfat_update_bh(bh, IS_DIRSYNC(inode));
        brelse(bh);
 
        return 0;
@@ -516,7 +516,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
        }
 
        fep->dentry.file.checksum = cpu_to_le16(chksum);
-       exfat_update_bh(sb, fbh, IS_DIRSYNC(inode));
+       exfat_update_bh(fbh, IS_DIRSYNC(inode));
 release_fbh:
        brelse(fbh);
        return ret;
@@ -538,7 +538,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
                return -EIO;
 
        ep->dentry.file.num_ext = (unsigned char)(num_entries - 1);
-       exfat_update_bh(sb, bh, sync);
+       exfat_update_bh(bh, sync);
        brelse(bh);
 
        ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
@@ -547,7 +547,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
 
        ep->dentry.stream.name_len = p_uniname->name_len;
        ep->dentry.stream.name_hash = cpu_to_le16(p_uniname->name_hash);
-       exfat_update_bh(sb, bh, sync);
+       exfat_update_bh(bh, sync);
        brelse(bh);
 
        for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) {
@@ -556,7 +556,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
                        return -EIO;
 
                exfat_init_name_entry(ep, uniname);
-               exfat_update_bh(sb, bh, sync);
+               exfat_update_bh(bh, sync);
                brelse(bh);
                uniname += EXFAT_FILE_NAME_LEN;
        }
@@ -580,7 +580,7 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
                        return -EIO;
 
                exfat_set_entry_type(ep, TYPE_DELETED);
-               exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+               exfat_update_bh(bh, IS_DIRSYNC(inode));
                brelse(bh);
        }
 
@@ -604,16 +604,20 @@ void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es)
        es->modified = true;
 }
 
-void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
+int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
 {
-       int i;
+       int i, err = 0;
 
-       for (i = 0; i < es->num_bh; i++) {
-               if (es->modified)
-                       exfat_update_bh(es->sb, es->bh[i], sync);
-               brelse(es->bh[i]);
-       }
+       if (es->modified)
+               err = exfat_update_bhs(es->bh, es->num_bh, sync);
+
+       for (i = 0; i < es->num_bh; i++)
+               if (err)
+                       bforget(es->bh[i]);
+               else
+                       brelse(es->bh[i]);
        kfree(es);
+       return err;
 }
 
 static int exfat_walk_fat_chain(struct super_block *sb,
index 75c7bdb..95d717f 100644 (file)
@@ -13,8 +13,6 @@
 #define EXFAT_SUPER_MAGIC       0x2011BAB0UL
 #define EXFAT_ROOT_INO         1
 
-#define EXFAT_SB_DIRTY         0
-
 #define EXFAT_CLUSTERS_UNTRACKED (~0u)
 
 /*
@@ -226,7 +224,8 @@ struct exfat_sb_info {
        unsigned int num_FAT_sectors; /* num of FAT sectors */
        unsigned int root_dir; /* root dir cluster */
        unsigned int dentries_per_clu; /* num of dentries per cluster */
-       unsigned int vol_flag; /* volume dirty flag */
+       unsigned int vol_flags; /* volume flags */
+       unsigned int vol_flags_persistent; /* volume flags to retain */
        struct buffer_head *boot_bh; /* buffer_head of BOOT sector */
 
        unsigned int map_clu; /* allocation bitmap start cluster */
@@ -238,7 +237,6 @@ struct exfat_sb_info {
        unsigned int clu_srch_ptr; /* cluster search pointer */
        unsigned int used_clusters; /* number of used clusters */
 
-       unsigned long s_state;
        struct mutex s_lock; /* superblock lock */
        struct exfat_mount_options options;
        struct nls_table *nls_io; /* Charset used for input and display */
@@ -383,7 +381,8 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi,
 }
 
 /* super.c */
-int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag);
+int exfat_set_volume_dirty(struct super_block *sb);
+int exfat_clear_volume_dirty(struct super_block *sb);
 
 /* fatent.c */
 #define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu)
@@ -463,7 +462,7 @@ struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
                int num);
 struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
                struct exfat_chain *p_dir, int entry, unsigned int type);
-void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync);
+int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync);
 int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
 
 /* inode.c */
@@ -515,7 +514,8 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
                u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
 u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
 u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type);
-void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync);
+void exfat_update_bh(struct buffer_head *bh, int sync);
+int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync);
 void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
                unsigned int size, unsigned char flags);
 void exfat_chain_dup(struct exfat_chain *dup, struct exfat_chain *ec);
index 350ce59..6aec628 100644 (file)
@@ -14,9 +14,8 @@
 
 #define EXFAT_MAX_FILE_LEN     255
 
-#define VOL_CLEAN              0x0000
-#define VOL_DIRTY              0x0002
-#define ERR_MEDIUM             0x0004
+#define VOLUME_DIRTY           0x0002
+#define MEDIA_FAILURE          0x0004
 
 #define EXFAT_EOF_CLUSTER      0xFFFFFFFFu
 #define EXFAT_BAD_CLUSTER      0xFFFFFFF7u
index 4e5c5c9..c3c9afe 100644 (file)
@@ -75,7 +75,7 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
 
        fat_entry = (__le32 *)&(bh->b_data[off]);
        *fat_entry = cpu_to_le32(content);
-       exfat_update_bh(sb, bh, sb->s_flags & SB_SYNCHRONOUS);
+       exfat_update_bh(bh, sb->s_flags & SB_SYNCHRONOUS);
        exfat_mirror_bh(sb, sec, bh);
        brelse(bh);
        return 0;
@@ -174,7 +174,6 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
                return -EIO;
        }
 
-       set_bit(EXFAT_SB_DIRTY, &sbi->s_state);
        clu = p_chain->dir;
 
        if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
@@ -230,21 +229,6 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
        return 0;
 }
 
-static inline int exfat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
-{
-       int i, err = 0;
-
-       for (i = 0; i < nr_bhs; i++)
-               write_dirty_buffer(bhs[i], 0);
-
-       for (i = 0; i < nr_bhs; i++) {
-               wait_on_buffer(bhs[i]);
-               if (!err && !buffer_uptodate(bhs[i]))
-                       err = -EIO;
-       }
-       return err;
-}
-
 int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
 {
        struct super_block *sb = dir->i_sb;
@@ -266,41 +250,23 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
        }
 
        /* Zeroing the unused blocks on this cluster */
-       n = 0;
        while (blknr < last_blknr) {
-               bhs[n] = sb_getblk(sb, blknr);
-               if (!bhs[n]) {
-                       err = -ENOMEM;
-                       goto release_bhs;
-               }
-               memset(bhs[n]->b_data, 0, sb->s_blocksize);
-               exfat_update_bh(sb, bhs[n], 0);
-
-               n++;
-               blknr++;
-
-               if (n == nr_bhs) {
-                       if (IS_DIRSYNC(dir)) {
-                               err = exfat_sync_bhs(bhs, n);
-                               if (err)
-                                       goto release_bhs;
+               for (n = 0; n < nr_bhs && blknr < last_blknr; n++, blknr++) {
+                       bhs[n] = sb_getblk(sb, blknr);
+                       if (!bhs[n]) {
+                               err = -ENOMEM;
+                               goto release_bhs;
                        }
-
-                       for (i = 0; i < n; i++)
-                               brelse(bhs[i]);
-                       n = 0;
+                       memset(bhs[n]->b_data, 0, sb->s_blocksize);
                }
-       }
 
-       if (IS_DIRSYNC(dir)) {
-               err = exfat_sync_bhs(bhs, n);
+               err = exfat_update_bhs(bhs, n, IS_DIRSYNC(dir));
                if (err)
                        goto release_bhs;
-       }
-
-       for (i = 0; i < n; i++)
-               brelse(bhs[i]);
 
+               for (i = 0; i < n; i++)
+                       brelse(bhs[i]);
+       }
        return 0;
 
 release_bhs:
@@ -358,8 +324,6 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
                }
        }
 
-       set_bit(EXFAT_SB_DIRTY, &sbi->s_state);
-
        p_chain->dir = EXFAT_EOF_CLUSTER;
 
        while ((new_clu = exfat_find_free_bitmap(sb, hint_clu)) !=
index a6a0638..f41f523 100644 (file)
@@ -106,7 +106,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
        if (ei->type != TYPE_FILE && ei->type != TYPE_DIR)
                return -EPERM;
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
 
        num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
        num_clusters_phys =
@@ -154,6 +154,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
                struct timespec64 ts;
                struct exfat_dentry *ep, *ep2;
                struct exfat_entry_set_cache *es;
+               int err;
 
                es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
                                ES_ALL_ENTRIES);
@@ -188,7 +189,9 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
                }
 
                exfat_update_dir_chksum_with_entry_set(es);
-               exfat_free_dentry_set(es, inode_needs_sync(inode));
+               err = exfat_free_dentry_set(es, inode_needs_sync(inode));
+               if (err)
+                       return err;
        }
 
        /* cut off from the FAT chain */
@@ -217,7 +220,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
        if (exfat_free_cluster(inode, &clu))
                return -EIO;
 
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
 
        return 0;
 }
index cf9ca6c..7f90204 100644 (file)
@@ -39,7 +39,7 @@ static int __exfat_write_inode(struct inode *inode, int sync)
        if (is_dir && ei->dir.dir == sbi->root_dir && ei->entry == -1)
                return 0;
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
 
        /* get the directory entry of given file or directory */
        es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES);
@@ -77,8 +77,7 @@ static int __exfat_write_inode(struct inode *inode, int sync)
        ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
 
        exfat_update_dir_chksum_with_entry_set(es);
-       exfat_free_dentry_set(es, sync);
-       return 0;
+       return exfat_free_dentry_set(es, sync);
 }
 
 int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -168,7 +167,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
        }
 
        if (*clu == EXFAT_EOF_CLUSTER) {
-               exfat_set_vol_flags(sb, VOL_DIRTY);
+               exfat_set_volume_dirty(sb);
 
                new_clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
                                EXFAT_EOF_CLUSTER : last_clu + 1;
@@ -222,6 +221,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
                if (ei->dir.dir != DIR_DELETED && modified) {
                        struct exfat_dentry *ep;
                        struct exfat_entry_set_cache *es;
+                       int err;
 
                        es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
                                ES_ALL_ENTRIES);
@@ -240,8 +240,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
                                ep->dentry.stream.valid_size;
 
                        exfat_update_dir_chksum_with_entry_set(es);
-                       exfat_free_dentry_set(es, inode_needs_sync(inode));
-
+                       err = exfat_free_dentry_set(es, inode_needs_sync(inode));
+                       if (err)
+                               return err;
                } /* end of if != DIR_DELETED */
 
                inode->i_blocks +=
index 17d41f3..d34e619 100644 (file)
@@ -163,9 +163,8 @@ u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type)
        return chksum;
 }
 
-void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync)
+void exfat_update_bh(struct buffer_head *bh, int sync)
 {
-       set_bit(EXFAT_SB_DIRTY, &EXFAT_SB(sb)->s_state);
        set_buffer_uptodate(bh);
        mark_buffer_dirty(bh);
 
@@ -173,6 +172,25 @@ void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync)
                sync_dirty_buffer(bh);
 }
 
+int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync)
+{
+       int i, err = 0;
+
+       for (i = 0; i < nr_bhs; i++) {
+               set_buffer_uptodate(bhs[i]);
+               mark_buffer_dirty(bhs[i]);
+               if (sync)
+                       write_dirty_buffer(bhs[i], 0);
+       }
+
+       for (i = 0; i < nr_bhs && sync; i++) {
+               wait_on_buffer(bhs[i]);
+               if (!err && !buffer_uptodate(bhs[i]))
+                       err = -EIO;
+       }
+       return err;
+}
+
 void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
                unsigned int size, unsigned char flags)
 {
index 2b9e210..e73f20f 100644 (file)
@@ -387,7 +387,7 @@ static int exfat_find_empty_entry(struct inode *inode,
                        ep->dentry.stream.valid_size = cpu_to_le64(size);
                        ep->dentry.stream.size = ep->dentry.stream.valid_size;
                        ep->dentry.stream.flags = p_dir->flags;
-                       exfat_update_bh(sb, bh, IS_DIRSYNC(inode));
+                       exfat_update_bh(bh, IS_DIRSYNC(inode));
                        brelse(bh);
                        if (exfat_update_dir_chksum(inode, &(ei->dir),
                            ei->entry))
@@ -562,10 +562,10 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        int err;
 
        mutex_lock(&EXFAT_SB(sb)->s_lock);
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE,
                &info);
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -834,7 +834,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
        num_entries++;
        brelse(bh);
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
        /* update the directory entry */
        if (exfat_remove_entries(dir, &cdir, entry, 0, num_entries)) {
                err = -EIO;
@@ -843,7 +843,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
 
        /* This doesn't modify ei */
        ei->dir.dir = DIR_DELETED;
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -873,10 +873,10 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        int err;
 
        mutex_lock(&EXFAT_SB(sb)->s_lock);
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
        err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR,
                &info);
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
        if (err)
                goto unlock;
 
@@ -1001,14 +1001,14 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
        num_entries++;
        brelse(bh);
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
        err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
        if (err) {
                exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
                goto unlock;
        }
        ei->dir.dir = DIR_DELETED;
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
 
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_atime = current_time(dir);
@@ -1071,7 +1071,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
                        epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
                        ei->attr |= ATTR_ARCHIVE;
                }
-               exfat_update_bh(sb, new_bh, sync);
+               exfat_update_bh(new_bh, sync);
                brelse(old_bh);
                brelse(new_bh);
 
@@ -1087,7 +1087,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
                }
 
                memcpy(epnew, epold, DENTRY_SIZE);
-               exfat_update_bh(sb, new_bh, sync);
+               exfat_update_bh(new_bh, sync);
                brelse(old_bh);
                brelse(new_bh);
 
@@ -1104,7 +1104,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
                        epold->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
                        ei->attr |= ATTR_ARCHIVE;
                }
-               exfat_update_bh(sb, old_bh, sync);
+               exfat_update_bh(old_bh, sync);
                brelse(old_bh);
                ret = exfat_init_ext_entry(inode, p_dir, oldentry,
                        num_new_entries, p_uniname);
@@ -1159,7 +1159,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
                epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE);
                ei->attr |= ATTR_ARCHIVE;
        }
-       exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+       exfat_update_bh(new_bh, IS_DIRSYNC(inode));
        brelse(mov_bh);
        brelse(new_bh);
 
@@ -1175,7 +1175,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
        }
 
        memcpy(epnew, epmov, DENTRY_SIZE);
-       exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
+       exfat_update_bh(new_bh, IS_DIRSYNC(inode));
        brelse(mov_bh);
        brelse(new_bh);
 
@@ -1300,7 +1300,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
        if (ret)
                goto out;
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
+       exfat_set_volume_dirty(sb);
 
        if (olddir.dir == newdir.dir)
                ret = exfat_rename_file(new_parent_inode, &olddir, dentry,
@@ -1355,7 +1355,7 @@ del_out:
                 */
                new_ei->dir.dir = DIR_DELETED;
        }
-       exfat_set_vol_flags(sb, VOL_CLEAN);
+       exfat_clear_volume_dirty(sb);
 out:
        return ret;
 }
index 253a924..3b6a165 100644 (file)
@@ -45,9 +45,6 @@ static void exfat_put_super(struct super_block *sb)
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
 
        mutex_lock(&sbi->s_lock);
-       if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state))
-               sync_blockdev(sb->s_bdev);
-       exfat_set_vol_flags(sb, VOL_CLEAN);
        exfat_free_bitmap(sbi);
        brelse(sbi->boot_bh);
        mutex_unlock(&sbi->s_lock);
@@ -60,13 +57,14 @@ static int exfat_sync_fs(struct super_block *sb, int wait)
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        int err = 0;
 
+       if (!wait)
+               return 0;
+
        /* If there are some dirty buffers in the bdev inode */
        mutex_lock(&sbi->s_lock);
-       if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state)) {
-               sync_blockdev(sb->s_bdev);
-               if (exfat_set_vol_flags(sb, VOL_CLEAN))
-                       err = -EIO;
-       }
+       sync_blockdev(sb->s_bdev);
+       if (exfat_clear_volume_dirty(sb))
+               err = -EIO;
        mutex_unlock(&sbi->s_lock);
        return err;
 }
@@ -98,17 +96,20 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
-int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
+static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags)
 {
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
        bool sync;
 
+       /* retain persistent-flags */
+       new_flags |= sbi->vol_flags_persistent;
+
        /* flags are not changed */
-       if (sbi->vol_flag == new_flag)
+       if (sbi->vol_flags == new_flags)
                return 0;
 
-       sbi->vol_flag = new_flag;
+       sbi->vol_flags = new_flags;
 
        /* skip updating volume dirty flag,
         * if this volume has been mounted with read-only
@@ -116,9 +117,9 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
        if (sb_rdonly(sb))
                return 0;
 
-       p_boot->vol_flags = cpu_to_le16(new_flag);
+       p_boot->vol_flags = cpu_to_le16(new_flags);
 
-       if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->boot_bh))
+       if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh))
                sync = true;
        else
                sync = false;
@@ -131,6 +132,20 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
        return 0;
 }
 
+int exfat_set_volume_dirty(struct super_block *sb)
+{
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+       return exfat_set_vol_flags(sb, sbi->vol_flags | VOLUME_DIRTY);
+}
+
+int exfat_clear_volume_dirty(struct super_block *sb)
+{
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+       return exfat_set_vol_flags(sb, sbi->vol_flags & ~VOLUME_DIRTY);
+}
+
 static int exfat_show_options(struct seq_file *m, struct dentry *root)
 {
        struct super_block *sb = root->d_sb;
@@ -459,7 +474,8 @@ static int exfat_read_boot_sector(struct super_block *sb)
        sbi->dentries_per_clu = 1 <<
                (sbi->cluster_size_bits - DENTRY_SIZE_BITS);
 
-       sbi->vol_flag = le16_to_cpu(p_boot->vol_flags);
+       sbi->vol_flags = le16_to_cpu(p_boot->vol_flags);
+       sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE);
        sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
        sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
 
@@ -474,9 +490,9 @@ static int exfat_read_boot_sector(struct super_block *sb)
                exfat_err(sb, "bogus data start sector");
                return -EINVAL;
        }
-       if (sbi->vol_flag & VOL_DIRTY)
+       if (sbi->vol_flags & VOLUME_DIRTY)
                exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
-       if (sbi->vol_flag & ERR_MEDIUM)
+       if (sbi->vol_flags & MEDIA_FAILURE)
                exfat_warn(sb, "Medium has reported failures. Some data may be lost.");
 
        /* exFAT file size is limited by a disk volume size */
index 1afa5a4..619dd35 100644 (file)
@@ -110,7 +110,7 @@ config EXT4_KUNIT_TESTS
          This builds the ext4 KUnit tests.
 
          KUnit tests run during boot and output the results to the debug log
-         in TAP format (http://testanything.org/). Only useful for kernel devs
+         in TAP format (https://testanything.org/). Only useful for kernel devs
          running KUnit test harness and are not for inclusion into a production
          build.
 
index 1ba46d8..48c3df4 100644 (file)
@@ -413,7 +413,8 @@ verified:
  * Return buffer_head on success or an ERR_PTR in case of failure.
  */
 struct buffer_head *
-ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
+                             bool ignore_locked)
 {
        struct ext4_group_desc *desc;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -441,6 +442,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                return ERR_PTR(-ENOMEM);
        }
 
+       if (ignore_locked && buffer_locked(bh)) {
+               /* buffer under IO already, return if called for prefetching */
+               put_bh(bh);
+               return NULL;
+       }
+
        if (bitmap_uptodate(bh))
                goto verify;
 
@@ -487,10 +494,11 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
         * submit the buffer_head for reading
         */
        set_buffer_new(bh);
-       trace_ext4_read_block_bitmap_load(sb, block_group);
+       trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
        bh->b_end_io = ext4_end_bitmap_read;
        get_bh(bh);
-       submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
+       submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO |
+                 (ignore_locked ? REQ_RAHEAD : 0), bh);
        return bh;
 verify:
        err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
@@ -534,7 +542,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
        struct buffer_head *bh;
        int err;
 
-       bh = ext4_read_block_bitmap_nowait(sb, block_group);
+       bh = ext4_read_block_bitmap_nowait(sb, block_group, false);
        if (IS_ERR(bh))
                return bh;
        err = ext4_wait_block_bitmap(sb, block_group, bh);
index 16e9b2f..c54ba52 100644 (file)
@@ -24,6 +24,7 @@ struct ext4_system_zone {
        struct rb_node  node;
        ext4_fsblk_t    start_blk;
        unsigned int    count;
+       u32             ino;
 };
 
 static struct kmem_cache *ext4_system_zone_cachep;
@@ -45,7 +46,8 @@ void ext4_exit_system_zone(void)
 static inline int can_merge(struct ext4_system_zone *entry1,
                     struct ext4_system_zone *entry2)
 {
-       if ((entry1->start_blk + entry1->count) == entry2->start_blk)
+       if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
+           entry1->ino == entry2->ino)
                return 1;
        return 0;
 }
@@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks)
  */
 static int add_system_zone(struct ext4_system_blocks *system_blks,
                           ext4_fsblk_t start_blk,
-                          unsigned int count)
+                          unsigned int count, u32 ino)
 {
-       struct ext4_system_zone *new_entry = NULL, *entry;
+       struct ext4_system_zone *new_entry, *entry;
        struct rb_node **n = &system_blks->root.rb_node, *node;
        struct rb_node *parent = NULL, *new_node = NULL;
 
@@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
                        n = &(*n)->rb_left;
                else if (start_blk >= (entry->start_blk + entry->count))
                        n = &(*n)->rb_right;
-               else {
-                       if (start_blk + count > (entry->start_blk +
-                                                entry->count))
-                               entry->count = (start_blk + count -
-                                               entry->start_blk);
-                       new_node = *n;
-                       new_entry = rb_entry(new_node, struct ext4_system_zone,
-                                            node);
-                       break;
-               }
+               else    /* Unexpected overlap of system zones. */
+                       return -EFSCORRUPTED;
        }
 
-       if (!new_entry) {
-               new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
-                                            GFP_KERNEL);
-               if (!new_entry)
-                       return -ENOMEM;
-               new_entry->start_blk = start_blk;
-               new_entry->count = count;
-               new_node = &new_entry->node;
-
-               rb_link_node(new_node, parent, n);
-               rb_insert_color(new_node, &system_blks->root);
-       }
+       new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
+                                    GFP_KERNEL);
+       if (!new_entry)
+               return -ENOMEM;
+       new_entry->start_blk = start_blk;
+       new_entry->count = count;
+       new_entry->ino = ino;
+       new_node = &new_entry->node;
+
+       rb_link_node(new_node, parent, n);
+       rb_insert_color(new_node, &system_blks->root);
 
        /* Can we merge to the left? */
        node = rb_prev(new_node);
@@ -151,40 +144,6 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
        printk(KERN_CONT "\n");
 }
 
-/*
- * Returns 1 if the passed-in block region (start_blk,
- * start_blk+count) is valid; 0 if some part of the block region
- * overlaps with filesystem metadata blocks.
- */
-static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
-                                    struct ext4_system_blocks *system_blks,
-                                    ext4_fsblk_t start_blk,
-                                    unsigned int count)
-{
-       struct ext4_system_zone *entry;
-       struct rb_node *n;
-
-       if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
-           (start_blk + count < start_blk) ||
-           (start_blk + count > ext4_blocks_count(sbi->s_es)))
-               return 0;
-
-       if (system_blks == NULL)
-               return 1;
-
-       n = system_blks->root.rb_node;
-       while (n) {
-               entry = rb_entry(n, struct ext4_system_zone, node);
-               if (start_blk + count - 1 < entry->start_blk)
-                       n = n->rb_left;
-               else if (start_blk >= (entry->start_blk + entry->count))
-                       n = n->rb_right;
-               else
-                       return 0;
-       }
-       return 1;
-}
-
 static int ext4_protect_reserved_inode(struct super_block *sb,
                                       struct ext4_system_blocks *system_blks,
                                       u32 ino)
@@ -214,19 +173,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
                if (n == 0) {
                        i++;
                } else {
-                       if (!ext4_data_block_valid_rcu(sbi, system_blks,
-                                               map.m_pblk, n)) {
-                               err = -EFSCORRUPTED;
-                               __ext4_error(sb, __func__, __LINE__, -err,
-                                            map.m_pblk, "blocks %llu-%llu "
-                                            "from inode %u overlap system zone",
-                                            map.m_pblk,
-                                            map.m_pblk + map.m_len - 1, ino);
+                       err = add_system_zone(system_blks, map.m_pblk, n, ino);
+                       if (err < 0) {
+                               if (err == -EFSCORRUPTED) {
+                                       __ext4_error(sb, __func__, __LINE__,
+                                                    -err, map.m_pblk,
+                                                    "blocks %llu-%llu from inode %u overlap system zone",
+                                                    map.m_pblk,
+                                                    map.m_pblk + map.m_len - 1,
+                                                    ino);
+                               }
                                break;
                        }
-                       err = add_system_zone(system_blks, map.m_pblk, n);
-                       if (err < 0)
-                               break;
                        i += n;
                }
        }
@@ -262,14 +220,6 @@ int ext4_setup_system_zone(struct super_block *sb)
        int flex_size = ext4_flex_bg_size(sbi);
        int ret;
 
-       if (!test_opt(sb, BLOCK_VALIDITY)) {
-               if (sbi->system_blks)
-                       ext4_release_system_zone(sb);
-               return 0;
-       }
-       if (sbi->system_blks)
-               return 0;
-
        system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
        if (!system_blks)
                return -ENOMEM;
@@ -277,22 +227,25 @@ int ext4_setup_system_zone(struct super_block *sb)
        for (i=0; i < ngroups; i++) {
                cond_resched();
                if (ext4_bg_has_super(sb, i) &&
-                   ((i < 5) || ((i % flex_size) == 0)))
-                       add_system_zone(system_blks,
+                   ((i < 5) || ((i % flex_size) == 0))) {
+                       ret = add_system_zone(system_blks,
                                        ext4_group_first_block_no(sb, i),
-                                       ext4_bg_num_gdb(sb, i) + 1);
+                                       ext4_bg_num_gdb(sb, i) + 1, 0);
+                       if (ret)
+                               goto err;
+               }
                gdp = ext4_get_group_desc(sb, i, NULL);
                ret = add_system_zone(system_blks,
-                               ext4_block_bitmap(sb, gdp), 1);
+                               ext4_block_bitmap(sb, gdp), 1, 0);
                if (ret)
                        goto err;
                ret = add_system_zone(system_blks,
-                               ext4_inode_bitmap(sb, gdp), 1);
+                               ext4_inode_bitmap(sb, gdp), 1, 0);
                if (ret)
                        goto err;
                ret = add_system_zone(system_blks,
                                ext4_inode_table(sb, gdp),
-                               sbi->s_itb_per_group);
+                               sbi->s_itb_per_group, 0);
                if (ret)
                        goto err;
        }
@@ -341,11 +294,24 @@ void ext4_release_system_zone(struct super_block *sb)
                call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
 }
 
-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+/*
+ * Returns 1 if the passed-in block region (start_blk,
+ * start_blk+count) is valid; 0 if some part of the block region
+ * overlaps with some other filesystem metadata blocks.
+ */
+int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
                          unsigned int count)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct ext4_system_blocks *system_blks;
-       int ret;
+       struct ext4_system_zone *entry;
+       struct rb_node *n;
+       int ret = 1;
+
+       if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+           (start_blk + count < start_blk) ||
+           (start_blk + count > ext4_blocks_count(sbi->s_es)))
+               return 0;
 
        /*
         * Lock the system zone to prevent it being released concurrently
@@ -354,8 +320,22 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
         */
        rcu_read_lock();
        system_blks = rcu_dereference(sbi->system_blks);
-       ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
-                                       count);
+       if (system_blks == NULL)
+               goto out_rcu;
+
+       n = system_blks->root.rb_node;
+       while (n) {
+               entry = rb_entry(n, struct ext4_system_zone, node);
+               if (start_blk + count - 1 < entry->start_blk)
+                       n = n->rb_left;
+               else if (start_blk >= (entry->start_blk + entry->count))
+                       n = n->rb_right;
+               else {
+                       ret = (entry->ino == inode->i_ino);
+                       break;
+               }
+       }
+out_rcu:
        rcu_read_unlock();
        return ret;
 }
@@ -374,8 +354,7 @@ int ext4_check_blockref(const char *function, unsigned int line,
        while (bref < p+max) {
                blk = le32_to_cpu(*bref++);
                if (blk &&
-                   unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
-                                                   blk, 1))) {
+                   unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
                        ext4_error_inode(inode, function, line, blk,
                                         "invalid block");
                        return -EFSCORRUPTED;
index 42f5060..523e00d 100644 (file)
@@ -434,10 +434,36 @@ struct flex_groups {
 #define EXT4_CASEFOLD_FL               0x40000000 /* Casefolded directory */
 #define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
 
-#define EXT4_FL_USER_VISIBLE           0x725BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE                0x624BC0FF /* User modifiable flags */
-
-/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
+/* User modifiable flags */
+#define EXT4_FL_USER_MODIFIABLE                (EXT4_SECRM_FL | \
+                                        EXT4_UNRM_FL | \
+                                        EXT4_COMPR_FL | \
+                                        EXT4_SYNC_FL | \
+                                        EXT4_IMMUTABLE_FL | \
+                                        EXT4_APPEND_FL | \
+                                        EXT4_NODUMP_FL | \
+                                        EXT4_NOATIME_FL | \
+                                        EXT4_JOURNAL_DATA_FL | \
+                                        EXT4_NOTAIL_FL | \
+                                        EXT4_DIRSYNC_FL | \
+                                        EXT4_TOPDIR_FL | \
+                                        EXT4_EXTENTS_FL | \
+                                        0x00400000 /* EXT4_EOFBLOCKS_FL */ | \
+                                        EXT4_DAX_FL | \
+                                        EXT4_PROJINHERIT_FL | \
+                                        EXT4_CASEFOLD_FL)
+
+/* User visible flags */
+#define EXT4_FL_USER_VISIBLE           (EXT4_FL_USER_MODIFIABLE | \
+                                        EXT4_DIRTY_FL | \
+                                        EXT4_COMPRBLK_FL | \
+                                        EXT4_NOCOMPR_FL | \
+                                        EXT4_ENCRYPT_FL | \
+                                        EXT4_INDEX_FL | \
+                                        EXT4_VERITY_FL | \
+                                        EXT4_INLINE_DATA_FL)
+
+/* Flags we can manipulate with through FS_IOC_FSSETXATTR */
 #define EXT4_FL_XFLAG_VISIBLE          (EXT4_SYNC_FL | \
                                         EXT4_IMMUTABLE_FL | \
                                         EXT4_APPEND_FL | \
@@ -669,8 +695,6 @@ enum {
 /*
  * ioctl commands
  */
-#define        EXT4_IOC_GETFLAGS               FS_IOC_GETFLAGS
-#define        EXT4_IOC_SETFLAGS               FS_IOC_SETFLAGS
 #define        EXT4_IOC_GETVERSION             _IOR('f', 3, long)
 #define        EXT4_IOC_SETVERSION             _IOW('f', 4, long)
 #define        EXT4_IOC_GETVERSION_OLD         FS_IOC_GETVERSION
@@ -687,17 +711,11 @@ enum {
 #define EXT4_IOC_RESIZE_FS             _IOW('f', 16, __u64)
 #define EXT4_IOC_SWAP_BOOT             _IO('f', 17)
 #define EXT4_IOC_PRECACHE_EXTENTS      _IO('f', 18)
-#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
-#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
-#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
 /* ioctl codes 19--39 are reserved for fscrypt */
 #define EXT4_IOC_CLEAR_ES_CACHE                _IO('f', 40)
 #define EXT4_IOC_GETSTATE              _IOW('f', 41, __u32)
 #define EXT4_IOC_GET_ES_CACHE          _IOWR('f', 42, struct fiemap)
 
-#define EXT4_IOC_FSGETXATTR            FS_IOC_FSGETXATTR
-#define EXT4_IOC_FSSETXATTR            FS_IOC_FSSETXATTR
-
 #define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32)
 
 /*
@@ -722,8 +740,6 @@ enum {
 /*
  * ioctl commands in 32 bit emulation
  */
-#define EXT4_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
-#define EXT4_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
 #define EXT4_IOC32_GETVERSION          _IOR('f', 3, int)
 #define EXT4_IOC32_SETVERSION          _IOW('f', 4, int)
 #define EXT4_IOC32_GETRSVSZ            _IOR('f', 5, int)
@@ -1054,6 +1070,7 @@ struct ext4_inode_info {
        struct timespec64 i_crtime;
 
        /* mballoc */
+       atomic_t i_prealloc_active;
        struct list_head i_prealloc_list;
        spinlock_t i_prealloc_lock;
 
@@ -1172,6 +1189,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_JOURNAL_CHECKSUM    0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT        0x1000000 /* Journal Async Commit */
 #define EXT4_MOUNT_WARN_ON_ERROR       0x2000000 /* Trigger WARN_ON on error */
+#define EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS 0x4000000
 #define EXT4_MOUNT_DELALLOC            0x8000000 /* Delalloc support */
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
@@ -1501,10 +1519,13 @@ struct ext4_sb_info {
        unsigned int s_mb_stats;
        unsigned int s_mb_order2_reqs;
        unsigned int s_mb_group_prealloc;
+       unsigned int s_mb_max_inode_prealloc;
        unsigned int s_max_dir_size_kb;
        /* where last allocation was done - for stream allocation */
        unsigned long s_mb_last_group;
        unsigned long s_mb_last_start;
+       unsigned int s_mb_prefetch;
+       unsigned int s_mb_prefetch_limit;
 
        /* stats for buddy allocator */
        atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
@@ -1572,6 +1593,8 @@ struct ext4_sb_info {
        struct ratelimit_state s_err_ratelimit_state;
        struct ratelimit_state s_warning_ratelimit_state;
        struct ratelimit_state s_msg_ratelimit_state;
+       atomic_t s_warning_count;
+       atomic_t s_msg_count;
 
        /* Encryption context for '-o test_dummy_encryption' */
        struct fscrypt_dummy_context s_dummy_enc_ctx;
@@ -1585,6 +1608,9 @@ struct ext4_sb_info {
 #ifdef CONFIG_EXT4_DEBUG
        unsigned long s_simulate_fail;
 #endif
+       /* Record the errseq of the backing block device */
+       errseq_t s_bdev_wb_err;
+       spinlock_t s_bdev_wb_lock;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2313,9 +2339,15 @@ struct ext4_lazy_init {
        struct mutex            li_list_mtx;
 };
 
+enum ext4_li_mode {
+       EXT4_LI_MODE_PREFETCH_BBITMAP,
+       EXT4_LI_MODE_ITABLE,
+};
+
 struct ext4_li_request {
        struct super_block      *lr_super;
-       struct ext4_sb_info     *lr_sbi;
+       enum ext4_li_mode       lr_mode;
+       ext4_group_t            lr_first_not_zeroed;
        ext4_group_t            lr_next_group;
        struct list_head        lr_request;
        unsigned long           lr_next_sched;
@@ -2446,7 +2478,8 @@ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
 extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
 
 extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
-                                               ext4_group_t block_group);
+                                               ext4_group_t block_group,
+                                               bool ignore_locked);
 extern int ext4_wait_block_bitmap(struct super_block *sb,
                                  ext4_group_t block_group,
                                  struct buffer_head *bh);
@@ -2651,9 +2684,15 @@ extern int ext4_mb_release(struct super_block *);
 extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
                                struct ext4_allocation_request *, int *);
 extern int ext4_mb_reserve_blocks(struct super_block *, int);
-extern void ext4_discard_preallocations(struct inode *);
+extern void ext4_discard_preallocations(struct inode *, unsigned int);
 extern int __init ext4_init_mballoc(void);
 extern void ext4_exit_mballoc(void);
+extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
+                                    ext4_group_t group,
+                                    unsigned int nr, int *cnt);
+extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+                                 unsigned int nr);
+
 extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
                             struct buffer_head *bh, ext4_fsblk_t block,
                             unsigned long count, int flags);
@@ -2765,8 +2804,7 @@ extern int ext4_search_dir(struct buffer_head *bh,
                           struct ext4_filename *fname,
                           unsigned int offset,
                           struct ext4_dir_entry_2 **res_dir);
-extern int ext4_generic_delete_entry(handle_t *handle,
-                                    struct inode *dir,
+extern int ext4_generic_delete_entry(struct inode *dir,
                                     struct ext4_dir_entry_2 *de_del,
                                     struct buffer_head *bh,
                                     void *entry_buf,
@@ -2924,12 +2962,6 @@ do {                                                                     \
 
 #endif
 
-extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
-                                       __u32 compat);
-extern int ext4_update_rocompat_feature(handle_t *handle,
-                                       struct super_block *sb, __u32 rocompat);
-extern int ext4_update_incompat_feature(handle_t *handle,
-                                       struct super_block *sb, __u32 incompat);
 extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
                                      struct ext4_group_desc *bg);
 extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
@@ -3145,6 +3177,7 @@ struct ext4_group_info {
        (1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT)
 #define EXT4_GROUP_INFO_IBITMAP_CORRUPT                \
        (1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT)
+#define EXT4_GROUP_INFO_BBITMAP_READ_BIT       4
 
 #define EXT4_MB_GRP_NEED_INIT(grp)     \
        (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
@@ -3159,6 +3192,8 @@ struct ext4_group_info {
        (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
 #define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
        (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_TEST_AND_SET_READ(grp)     \
+       (test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_READ_BIT, &((grp)->bb_state)))
 
 #define EXT4_MAX_CONTENTION            8
 #define EXT4_CONTENTION_THRESHOLD      2
@@ -3363,9 +3398,9 @@ extern void ext4_release_system_zone(struct super_block *sb);
 extern int ext4_setup_system_zone(struct super_block *sb);
 extern int __init ext4_init_system_zone(void);
 extern void ext4_exit_system_zone(void);
-extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
-                                ext4_fsblk_t start_blk,
-                                unsigned int count);
+extern int ext4_inode_block_valid(struct inode *inode,
+                                 ext4_fsblk_t start_blk,
+                                 unsigned int count);
 extern int ext4_check_blockref(const char *, unsigned int,
                               struct inode *, __le32 *, unsigned int);
 
index 0c76cdd..760b9ee 100644 (file)
@@ -195,6 +195,28 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line,
        jbd2_journal_abort_handle(handle);
 }
 
+static void ext4_check_bdev_write_error(struct super_block *sb)
+{
+       struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int err;
+
+       /*
+        * If the block device has write error flag, it may have failed to
+        * async write out metadata buffers in the background. In this case,
+        * we could read old data from disk and write it out again, which
+        * may lead to on-disk filesystem inconsistency.
+        */
+       if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) {
+               spin_lock(&sbi->s_bdev_wb_lock);
+               err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err);
+               spin_unlock(&sbi->s_bdev_wb_lock);
+               if (err)
+                       ext4_error_err(sb, -err,
+                                      "Error while async write back metadata");
+       }
+}
+
 int __ext4_journal_get_write_access(const char *where, unsigned int line,
                                    handle_t *handle, struct buffer_head *bh)
 {
@@ -202,6 +224,9 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
 
        might_sleep();
 
+       if (bh->b_bdev->bd_super)
+               ext4_check_bdev_write_error(bh->b_bdev->bd_super);
+
        if (ext4_handle_valid(handle)) {
                err = jbd2_journal_get_write_access(handle, bh);
                if (err)
index 221f240..a048158 100644 (file)
@@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
         * i_mutex. So we can safely drop the i_data_sem here.
         */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
        up_write(&EXT4_I(inode)->i_data_sem);
        *dropped = 1;
        return 0;
@@ -340,7 +340,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
         */
        if (lblock + len <= lblock)
                return 0;
-       return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+       return ext4_inode_block_valid(inode, block, len);
 }
 
 static int ext4_valid_extent_idx(struct inode *inode,
@@ -348,7 +348,7 @@ static int ext4_valid_extent_idx(struct inode *inode,
 {
        ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
 
-       return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
+       return ext4_inode_block_valid(inode, block, 1);
 }
 
 static int ext4_valid_extent_entries(struct inode *inode,
@@ -507,14 +507,10 @@ __read_extent_tree_block(const char *function, unsigned int line,
        }
        if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
                return bh;
-       if (!ext4_has_feature_journal(inode->i_sb) ||
-           (inode->i_ino !=
-            le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
-               err = __ext4_ext_check(function, line, inode,
-                                      ext_block_hdr(bh), depth, pblk);
-               if (err)
-                       goto errout;
-       }
+       err = __ext4_ext_check(function, line, inode,
+                              ext_block_hdr(bh), depth, pblk);
+       if (err)
+               goto errout;
        set_buffer_verified(bh);
        /*
         * If this is a leaf block, cache all of its entries
@@ -693,10 +689,8 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path)
                return;
        depth = path->p_depth;
        for (i = 0; i <= depth; i++, path++) {
-               if (path->p_bh) {
-                       brelse(path->p_bh);
-                       path->p_bh = NULL;
-               }
+               brelse(path->p_bh);
+               path->p_bh = NULL;
        }
 }
 
@@ -1915,7 +1909,7 @@ out:
 
 /*
  * ext4_ext_insert_extent:
- * tries to merge requsted extent into the existing extent or
+ * tries to merge requested extent into the existing extent or
  * inserts requested extent as new one into the tree,
  * creating new leaf in the no-space case.
  */
@@ -3125,7 +3119,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
  *
  *
  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
- * of which are deterimined by split_flag.
+ * of which are determined by split_flag.
  *
  * There are two cases:
  *  a> the extent are splitted into two extent.
@@ -3650,7 +3644,7 @@ static int ext4_split_convert_extents(handle_t *handle,
                eof_block = map->m_lblk + map->m_len;
        /*
         * It is safe to convert extent to initialized via explicit
-        * zeroout only if extent is fully insde i_size or new_size.
+        * zeroout only if extent is fully inside i_size or new_size.
         */
        depth = ext_depth(inode);
        ex = path[depth].p_ext;
@@ -4272,7 +4266,7 @@ got_allocated_blocks:
                         * not a good idea to call discard here directly,
                         * but otherwise we'd need to call it every free().
                         */
-                       ext4_discard_preallocations(inode);
+                       ext4_discard_preallocations(inode, 0);
                        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                                fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
                        ext4_free_blocks(handle, inode, NULL, newblock,
@@ -4495,7 +4489,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        }
 
        /*
-        * Round up offset. This is not fallocate, we neet to zero out
+        * Round up offset. This is not fallocate, we need to zero out
         * blocks, so convert interior block aligned part of the range to
         * unwritten and possibly manually zero out unaligned parts of the
         * range.
@@ -5299,7 +5293,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        }
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
 
        ret = ext4_es_remove_extent(inode, punch_start,
                                    EXT_MAX_BLOCKS - punch_start);
@@ -5313,7 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
        }
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
 
        ret = ext4_ext_shift_extents(inode, handle, punch_stop,
                                     punch_stop - punch_start, SHIFT_LEFT);
@@ -5445,7 +5439,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
                goto out_stop;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
 
        path = ext4_find_extent(inode, offset_lblk, NULL, 0);
        if (IS_ERR(path)) {
@@ -5579,7 +5573,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
                }
                ex1 = path1[path1->p_depth].p_ext;
                ex2 = path2[path2->p_depth].p_ext;
-               /* Do we have somthing to swap ? */
+               /* Do we have something to swap ? */
                if (unlikely(!ex2 || !ex1))
                        goto finish;
 
index 129cc1d..7d61069 100644 (file)
@@ -145,10 +145,9 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
        /* if we are the last writer on the inode, drop the block reservation */
        if ((filp->f_mode & FMODE_WRITE) &&
                        (atomic_read(&inode->i_writecount) == 1) &&
-                       !EXT4_I(inode)->i_reserved_data_blocks)
-       {
+                       !EXT4_I(inode)->i_reserved_data_blocks) {
                down_write(&EXT4_I(inode)->i_data_sem);
-               ext4_discard_preallocations(inode);
+               ext4_discard_preallocations(inode, 0);
                up_write(&EXT4_I(inode)->i_data_sem);
        }
        if (is_dx(inode) && filp->private_data)
@@ -428,6 +427,10 @@ restart:
         */
        if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
             !ext4_overwrite_io(inode, offset, count))) {
+               if (iocb->ki_flags & IOCB_NOWAIT) {
+                       ret = -EAGAIN;
+                       goto out;
+               }
                inode_unlock_shared(inode);
                *ilock_shared = false;
                inode_lock(inode);
@@ -812,7 +815,7 @@ out:
        return err;
 }
 
-static int ext4_file_open(struct inode * inode, struct file * filp)
+static int ext4_file_open(struct inode *inode, struct file *filp)
 {
        int ret;
 
index 3e13379..2924261 100644 (file)
@@ -233,7 +233,7 @@ static int __ext4fs_dirhash(const char *name, int len,
                break;
        case DX_HASH_HALF_MD4_UNSIGNED:
                str2hashbuf = str2hashbuf_unsigned;
-               /* fall through */
+               fallthrough;
        case DX_HASH_HALF_MD4:
                p = name;
                while (len > 0) {
@@ -247,7 +247,7 @@ static int __ext4fs_dirhash(const char *name, int len,
                break;
        case DX_HASH_TEA_UNSIGNED:
                str2hashbuf = str2hashbuf_unsigned;
-               /* fall through */
+               fallthrough;
        case DX_HASH_TEA:
                p = name;
                while (len > 0) {
index be2b66e..80c9f33 100644 (file)
@@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
         * i_mutex. So we can safely drop the i_data_sem here.
         */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
        up_write(&EXT4_I(inode)->i_data_sem);
        *dropped = 1;
        return 0;
@@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
        else if (ext4_should_journal_data(inode))
                flags |= EXT4_FREE_BLOCKS_FORGET;
 
-       if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
-                                  count)) {
+       if (!ext4_inode_block_valid(inode, block_to_free, count)) {
                EXT4_ERROR_INODE(inode, "attempt to clear invalid "
                                 "blocks %llu len %lu",
                                 (unsigned long long) block_to_free, count);
@@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
                        if (!nr)
                                continue;               /* A hole */
 
-                       if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
-                                                  nr, 1)) {
+                       if (!ext4_inode_block_valid(inode, nr, 1)) {
                                EXT4_ERROR_INODE(inode,
                                                 "invalid indirect mapped "
                                                 "block %lu (level %d)",
@@ -1182,21 +1180,21 @@ do_indirects:
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
                        i_data[EXT4_IND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_IND_BLOCK:
                nr = i_data[EXT4_DIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
                        i_data[EXT4_DIND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_DIND_BLOCK:
                nr = i_data[EXT4_TIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
                        i_data[EXT4_TIND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_TIND_BLOCK:
                ;
        }
@@ -1436,7 +1434,7 @@ do_indirects:
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
                        i_data[EXT4_IND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_IND_BLOCK:
                if (++n >= n2)
                        break;
@@ -1445,7 +1443,7 @@ do_indirects:
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
                        i_data[EXT4_DIND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_DIND_BLOCK:
                if (++n >= n2)
                        break;
@@ -1454,7 +1452,7 @@ do_indirects:
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
                        i_data[EXT4_TIND_BLOCK] = 0;
                }
-               /* fall through */
+               fallthrough;
        case EXT4_TIND_BLOCK:
                ;
        }
index c3a1ad2..75c97bc 100644 (file)
@@ -276,7 +276,7 @@ static int ext4_create_inline_data(handle_t *handle,
                len = 0;
        }
 
-       /* Insert the the xttr entry. */
+       /* Insert the xttr entry. */
        i.value = value;
        i.value_len = len;
 
@@ -1706,7 +1706,7 @@ int ext4_delete_inline_entry(handle_t *handle,
        if (err)
                goto out;
 
-       err = ext4_generic_delete_entry(handle, dir, de_del, bh,
+       err = ext4_generic_delete_entry(dir, de_del, bh,
                                        inline_start, inline_size, 0);
        if (err)
                goto out;
index 44bad4b..3a196d8 100644 (file)
@@ -383,7 +383,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
         */
        if ((ei->i_reserved_data_blocks == 0) &&
            !inode_is_open_for_write(inode))
-               ext4_discard_preallocations(inode);
+               ext4_discard_preallocations(inode, 0);
 }
 
 static int __check_block_validity(struct inode *inode, const char *func,
@@ -394,8 +394,7 @@ static int __check_block_validity(struct inode *inode, const char *func,
            (inode->i_ino ==
             le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
                return 0;
-       if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
-                                  map->m_len)) {
+       if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
                ext4_error_inode(inode, func, line, map->m_pblk,
                                 "lblock %lu mapped to illegal pblock %llu "
                                 "(length %d)", (unsigned long) map->m_lblk,
@@ -3288,7 +3287,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
        if (PageChecked(page))
                return 0;
        if (journal)
-               return jbd2_journal_try_to_free_buffers(journal, page, wait);
+               return jbd2_journal_try_to_free_buffers(journal, page);
        else
                return try_to_free_buffers(page);
 }
@@ -4056,7 +4055,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
        if (stop_block > first_block) {
 
                down_write(&EXT4_I(inode)->i_data_sem);
-               ext4_discard_preallocations(inode);
+               ext4_discard_preallocations(inode, 0);
 
                ret = ext4_es_remove_extent(inode, first_block,
                                            stop_block - first_block);
@@ -4163,7 +4162,7 @@ int ext4_truncate(struct inode *inode)
        trace_ext4_truncate_enter(inode);
 
        if (!ext4_can_truncate(inode))
-               return 0;
+               goto out_trace;
 
        if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
                ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
@@ -4172,16 +4171,14 @@ int ext4_truncate(struct inode *inode)
                int has_inline = 1;
 
                err = ext4_inline_data_truncate(inode, &has_inline);
-               if (err)
-                       return err;
-               if (has_inline)
-                       return 0;
+               if (err || has_inline)
+                       goto out_trace;
        }
 
        /* If we zero-out tail of the page, we have to create jinode for jbd2 */
        if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
                if (ext4_inode_attach_jinode(inode) < 0)
-                       return 0;
+                       goto out_trace;
        }
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -4190,8 +4187,10 @@ int ext4_truncate(struct inode *inode)
                credits = ext4_blocks_for_truncate(inode);
 
        handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
+       if (IS_ERR(handle)) {
+               err = PTR_ERR(handle);
+               goto out_trace;
+       }
 
        if (inode->i_size & (inode->i_sb->s_blocksize - 1))
                ext4_block_truncate_page(handle, mapping, inode->i_size);
@@ -4211,7 +4210,7 @@ int ext4_truncate(struct inode *inode)
 
        down_write(&EXT4_I(inode)->i_data_sem);
 
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                err = ext4_ext_truncate(handle, inode);
@@ -4242,6 +4241,7 @@ out_stop:
                err = err2;
        ext4_journal_stop(handle);
 
+out_trace:
        trace_ext4_truncate_exit(inode);
        return err;
 }
@@ -4760,7 +4760,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
 
        ret = 0;
        if (ei->i_file_acl &&
-           !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
+           !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
                ext4_error_inode(inode, function, line, 0,
                                 "iget: bad extended attribute block %llu",
                                 ei->i_file_acl);
index 999cf6a..36eca3b 100644 (file)
@@ -202,7 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
        reset_inode_seed(inode);
        reset_inode_seed(inode_bl);
 
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
 
        err = ext4_mark_inode_dirty(handle, inode);
        if (err < 0) {
@@ -819,12 +819,12 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        switch (cmd) {
        case FS_IOC_GETFSMAP:
                return ext4_ioc_getfsmap(sb, (void __user *)arg);
-       case EXT4_IOC_GETFLAGS:
+       case FS_IOC_GETFLAGS:
                flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
                if (S_ISREG(inode->i_mode))
                        flags &= ~EXT4_PROJINHERIT_FL;
                return put_user(flags, (int __user *) arg);
-       case EXT4_IOC_SETFLAGS: {
+       case FS_IOC_SETFLAGS: {
                int err;
 
                if (!inode_owner_or_capable(inode))
@@ -1129,12 +1129,12 @@ resizefs_out:
        case EXT4_IOC_PRECACHE_EXTENTS:
                return ext4_ext_precache(inode);
 
-       case EXT4_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_SET_ENCRYPTION_POLICY:
                if (!ext4_has_feature_encrypt(sb))
                        return -EOPNOTSUPP;
                return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
-       case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+       case FS_IOC_GET_ENCRYPTION_PWSALT: {
 #ifdef CONFIG_FS_ENCRYPTION
                int err, err2;
                struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1174,7 +1174,7 @@ resizefs_out:
                return -EOPNOTSUPP;
 #endif
        }
-       case EXT4_IOC_GET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
                if (!ext4_has_feature_encrypt(sb))
                        return -EOPNOTSUPP;
                return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
@@ -1236,7 +1236,7 @@ resizefs_out:
        case EXT4_IOC_GET_ES_CACHE:
                return ext4_ioctl_get_es_cache(filp, arg);
 
-       case EXT4_IOC_FSGETXATTR:
+       case FS_IOC_FSGETXATTR:
        {
                struct fsxattr fa;
 
@@ -1247,7 +1247,7 @@ resizefs_out:
                        return -EFAULT;
                return 0;
        }
-       case EXT4_IOC_FSSETXATTR:
+       case FS_IOC_FSSETXATTR:
        {
                struct fsxattr fa, old_fa;
                int err;
@@ -1313,11 +1313,11 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        /* These are just misnamed, they actually get/put from/to user an int */
        switch (cmd) {
-       case EXT4_IOC32_GETFLAGS:
-               cmd = EXT4_IOC_GETFLAGS;
+       case FS_IOC32_GETFLAGS:
+               cmd = FS_IOC_GETFLAGS;
                break;
-       case EXT4_IOC32_SETFLAGS:
-               cmd = EXT4_IOC_SETFLAGS;
+       case FS_IOC32_SETFLAGS:
+               cmd = FS_IOC_SETFLAGS;
                break;
        case EXT4_IOC32_GETVERSION:
                cmd = EXT4_IOC_GETVERSION;
@@ -1361,9 +1361,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case EXT4_IOC_RESIZE_FS:
        case FITRIM:
        case EXT4_IOC_PRECACHE_EXTENTS:
-       case EXT4_IOC_SET_ENCRYPTION_POLICY:
-       case EXT4_IOC_GET_ENCRYPTION_PWSALT:
-       case EXT4_IOC_GET_ENCRYPTION_POLICY:
+       case FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_PWSALT:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
        case FS_IOC_GET_ENCRYPTION_POLICY_EX:
        case FS_IOC_ADD_ENCRYPTION_KEY:
        case FS_IOC_REMOVE_ENCRYPTION_KEY:
@@ -1377,8 +1377,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case EXT4_IOC_CLEAR_ES_CACHE:
        case EXT4_IOC_GETSTATE:
        case EXT4_IOC_GET_ES_CACHE:
-       case EXT4_IOC_FSGETXATTR:
-       case EXT4_IOC_FSSETXATTR:
+       case FS_IOC_FSGETXATTR:
+       case FS_IOC_FSSETXATTR:
                break;
        default:
                return -ENOIOCTLCMD;
index c0a331e..132c118 100644 (file)
@@ -922,7 +922,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
                        bh[i] = NULL;
                        continue;
                }
-               bh[i] = ext4_read_block_bitmap_nowait(sb, group);
+               bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
                if (IS_ERR(bh[i])) {
                        err = PTR_ERR(bh[i]);
                        bh[i] = NULL;
@@ -1279,9 +1279,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
        e4b->bd_buddy_page = page;
        e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
 
-       BUG_ON(e4b->bd_bitmap_page == NULL);
-       BUG_ON(e4b->bd_buddy_page == NULL);
-
        return 0;
 
 err:
@@ -1743,10 +1740,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
 
 }
 
-/*
- * regular allocator, for general purposes allocation
- */
-
 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
                                        struct ext4_buddy *e4b,
                                        int finish_group)
@@ -2119,13 +2112,11 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
 
        BUG_ON(cr < 0 || cr >= 4);
 
-       free = grp->bb_free;
-       if (free == 0)
-               return false;
-       if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
                return false;
 
-       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+       free = grp->bb_free;
+       if (free == 0)
                return false;
 
        fragments = grp->bb_fragments;
@@ -2142,8 +2133,10 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
                    ((group % flex_size) == 0))
                        return false;
 
-               if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
-                   (free / fragments) >= ac->ac_g_ex.fe_len)
+               if (free < ac->ac_g_ex.fe_len)
+                       return false;
+
+               if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
                        return true;
 
                if (grp->bb_largest_free_order < ac->ac_2order)
@@ -2177,6 +2170,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
 {
        struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
        struct super_block *sb = ac->ac_sb;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
        ext4_grpblk_t free;
        int ret = 0;
@@ -2195,7 +2189,25 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
 
        /* We only do this if the grp has never been initialized */
        if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
-               ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+               struct ext4_group_desc *gdp =
+                       ext4_get_group_desc(sb, group, NULL);
+               int ret;
+
+               /* cr=0/1 is a very optimistic search to find large
+                * good chunks almost for free.  If buddy data is not
+                * ready, then this optimization makes no sense.  But
+                * we never skip the first block group in a flex_bg,
+                * since this gets used for metadata block allocation,
+                * and we want to make sure we locate metadata blocks
+                * in the first block group in the flex_bg if possible.
+                */
+               if (cr < 2 &&
+                   (!sbi->s_log_groups_per_flex ||
+                    ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
+                   !(ext4_has_group_desc_csum(sb) &&
+                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
+                       return 0;
+               ret = ext4_mb_init_group(sb, group, GFP_NOFS);
                if (ret)
                        return ret;
        }
@@ -2209,15 +2221,95 @@ out:
        return ret;
 }
 
+/*
+ * Start prefetching @nr block bitmaps starting at @group.
+ * Return the next group which needs to be prefetched.
+ */
+ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
+                             unsigned int nr, int *cnt)
+{
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
+       struct buffer_head *bh;
+       struct blk_plug plug;
+
+       blk_start_plug(&plug);
+       while (nr-- > 0) {
+               struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
+                                                                 NULL);
+               struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+
+               /*
+                * Prefetch block groups with free blocks; but don't
+                * bother if it is marked uninitialized on disk, since
+                * it won't require I/O to read.  Also only try to
+                * prefetch once, so we avoid getblk() call, which can
+                * be expensive.
+                */
+               if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+                   EXT4_MB_GRP_NEED_INIT(grp) &&
+                   ext4_free_group_clusters(sb, gdp) > 0 &&
+                   !(ext4_has_group_desc_csum(sb) &&
+                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+                       bh = ext4_read_block_bitmap_nowait(sb, group, true);
+                       if (bh && !IS_ERR(bh)) {
+                               if (!buffer_uptodate(bh) && cnt)
+                                       (*cnt)++;
+                               brelse(bh);
+                       }
+               }
+               if (++group >= ngroups)
+                       group = 0;
+       }
+       blk_finish_plug(&plug);
+       return group;
+}
+
+/*
+ * Prefetching reads the block bitmap into the buffer cache; but we
+ * need to make sure that the buddy bitmap in the page cache has been
+ * initialized.  Note that ext4_mb_init_group() will block if the I/O
+ * is not yet completed, or indeed if it was not initiated by
+ * ext4_mb_prefetch did not start the I/O.
+ *
+ * TODO: We should actually kick off the buddy bitmap setup in a work
+ * queue when the buffer I/O is completed, so that we don't block
+ * waiting for the block allocation bitmap read to finish when
+ * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
+ */
+void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
+                          unsigned int nr)
+{
+       while (nr-- > 0) {
+               struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
+                                                                 NULL);
+               struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+
+               if (!group)
+                       group = ext4_get_groups_count(sb);
+               group--;
+               grp = ext4_get_group_info(sb, group);
+
+               if (EXT4_MB_GRP_NEED_INIT(grp) &&
+                   ext4_free_group_clusters(sb, gdp) > 0 &&
+                   !(ext4_has_group_desc_csum(sb) &&
+                     (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
+                       if (ext4_mb_init_group(sb, group, GFP_NOFS))
+                               break;
+               }
+       }
+}
+
 static noinline_for_stack int
 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
-       ext4_group_t ngroups, group, i;
+       ext4_group_t prefetch_grp = 0, ngroups, group, i;
        int cr = -1;
        int err = 0, first_err = 0;
+       unsigned int nr = 0, prefetch_ios = 0;
        struct ext4_sb_info *sbi;
        struct super_block *sb;
        struct ext4_buddy e4b;
+       int lost;
 
        sb = ac->ac_sb;
        sbi = EXT4_SB(sb);
@@ -2237,8 +2329,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
                goto out;
 
        /*
-        * ac->ac2_order is set only if the fe_len is a power of 2
-        * if ac2_order is set we also set criteria to 0 so that we
+        * ac->ac_2order is set only if the fe_len is a power of 2
+        * if ac->ac_2order is set we also set criteria to 0 so that we
         * try exact allocation using buddy.
         */
        i = fls(ac->ac_g_ex.fe_len);
@@ -2282,6 +2374,7 @@ repeat:
                 * from the goal value specified
                 */
                group = ac->ac_g_ex.fe_group;
+               prefetch_grp = group;
 
                for (i = 0; i < ngroups; group++, i++) {
                        int ret = 0;
@@ -2293,6 +2386,29 @@ repeat:
                        if (group >= ngroups)
                                group = 0;
 
+                       /*
+                        * Batch reads of the block allocation bitmaps
+                        * to get multiple READs in flight; limit
+                        * prefetching at cr=0/1, otherwise mballoc can
+                        * spend a lot of time loading imperfect groups
+                        */
+                       if ((prefetch_grp == group) &&
+                           (cr > 1 ||
+                            prefetch_ios < sbi->s_mb_prefetch_limit)) {
+                               unsigned int curr_ios = prefetch_ios;
+
+                               nr = sbi->s_mb_prefetch;
+                               if (ext4_has_feature_flex_bg(sb)) {
+                                       nr = (group / sbi->s_mb_prefetch) *
+                                               sbi->s_mb_prefetch;
+                                       nr = nr + sbi->s_mb_prefetch - group;
+                               }
+                               prefetch_grp = ext4_mb_prefetch(sb, group,
+                                                       nr, &prefetch_ios);
+                               if (prefetch_ios == curr_ios)
+                                       nr = 0;
+                       }
+
                        /* This now checks without needing the buddy page */
                        ret = ext4_mb_good_group_nolock(ac, group, cr);
                        if (ret <= 0) {
@@ -2341,22 +2457,24 @@ repeat:
                 * We've been searching too long. Let's try to allocate
                 * the best chunk we've found so far
                 */
-
                ext4_mb_try_best_found(ac, &e4b);
                if (ac->ac_status != AC_STATUS_FOUND) {
                        /*
                         * Someone more lucky has already allocated it.
                         * The only thing we can do is just take first
                         * found block(s)
-                       printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
                         */
+                       lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
+                       mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
+                                ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
+                                ac->ac_b_ex.fe_len, lost);
+
                        ac->ac_b_ex.fe_group = 0;
                        ac->ac_b_ex.fe_start = 0;
                        ac->ac_b_ex.fe_len = 0;
                        ac->ac_status = AC_STATUS_CONTINUE;
                        ac->ac_flags |= EXT4_MB_HINT_FIRST;
                        cr = 3;
-                       atomic_inc(&sbi->s_mb_lost_chunks);
                        goto repeat;
                }
        }
@@ -2367,6 +2485,10 @@ out:
        mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
                 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
                 ac->ac_flags, cr, err);
+
+       if (nr)
+               ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
+
        return err;
 }
 
@@ -2439,7 +2561,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
        for (i = 0; i <= 13; i++)
                seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
                                sg.info.bb_counters[i] : 0);
-       seq_printf(seq, " ]\n");
+       seq_puts(seq, " ]\n");
 
        return 0;
 }
@@ -2613,6 +2735,26 @@ static int ext4_mb_init_backend(struct super_block *sb)
                        goto err_freebuddy;
        }
 
+       if (ext4_has_feature_flex_bg(sb)) {
+               /* a single flex group is supposed to be read by a single IO */
+               sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex;
+               sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
+       } else {
+               sbi->s_mb_prefetch = 32;
+       }
+       if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
+               sbi->s_mb_prefetch = ext4_get_groups_count(sb);
+       /* now many real IOs to prefetch within a single allocation at cr=0
+        * given cr=0 is an CPU-related optimization we shouldn't try to
+        * load too many groups, at some point we should start to use what
+        * we've got in memory.
+        * with an average random access time 5ms, it'd take a second to get
+        * 200 groups (* N with flex_bg), so let's make this limit 4
+        */
+       sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
+       if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
+               sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
+
        return 0;
 
 err_freebuddy:
@@ -2736,6 +2878,7 @@ int ext4_mb_init(struct super_block *sb)
        sbi->s_mb_stats = MB_DEFAULT_STATS;
        sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
        sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
+       sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
        /*
         * The default group preallocation is 512, which for 4k block
         * sizes translates to 2 megabytes.  However for bigalloc file
@@ -3090,7 +3233,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
 
        len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
-       if (!ext4_data_block_valid(sbi, block, len)) {
+       if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
                ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
                           "fs metadata", block, block+len);
                /* File system mounted not to panic on error
@@ -3674,6 +3817,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
        mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
 }
 
+static void ext4_mb_mark_pa_deleted(struct super_block *sb,
+                                   struct ext4_prealloc_space *pa)
+{
+       struct ext4_inode_info *ei;
+
+       if (pa->pa_deleted) {
+               ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
+                            pa->pa_type, pa->pa_pstart, pa->pa_lstart,
+                            pa->pa_len);
+               return;
+       }
+
+       pa->pa_deleted = 1;
+
+       if (pa->pa_type == MB_INODE_PA) {
+               ei = EXT4_I(pa->pa_inode);
+               atomic_dec(&ei->i_prealloc_active);
+       }
+}
+
 static void ext4_mb_pa_callback(struct rcu_head *head)
 {
        struct ext4_prealloc_space *pa;
@@ -3706,7 +3869,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
                return;
        }
 
-       pa->pa_deleted = 1;
+       ext4_mb_mark_pa_deleted(sb, pa);
        spin_unlock(&pa->pa_lock);
 
        grp_blk = pa->pa_pstart;
@@ -3830,6 +3993,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
        spin_lock(pa->pa_obj_lock);
        list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
        spin_unlock(pa->pa_obj_lock);
+       atomic_inc(&ei->i_prealloc_active);
 }
 
 /*
@@ -4040,7 +4204,7 @@ repeat:
                }
 
                /* seems this one can be freed ... */
-               pa->pa_deleted = 1;
+               ext4_mb_mark_pa_deleted(sb, pa);
 
                /* we can trust pa_free ... */
                free += pa->pa_free;
@@ -4103,7 +4267,7 @@ out_dbg:
  *
  * FIXME!! Make sure it is valid at all the call sites
  */
-void ext4_discard_preallocations(struct inode *inode)
+void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct super_block *sb = inode->i_sb;
@@ -4121,15 +4285,19 @@ void ext4_discard_preallocations(struct inode *inode)
 
        mb_debug(sb, "discard preallocation for inode %lu\n",
                 inode->i_ino);
-       trace_ext4_discard_preallocations(inode);
+       trace_ext4_discard_preallocations(inode,
+                       atomic_read(&ei->i_prealloc_active), needed);
 
        INIT_LIST_HEAD(&list);
 
+       if (needed == 0)
+               needed = UINT_MAX;
+
 repeat:
        /* first, collect all pa's in the inode */
        spin_lock(&ei->i_prealloc_lock);
-       while (!list_empty(&ei->i_prealloc_list)) {
-               pa = list_entry(ei->i_prealloc_list.next,
+       while (!list_empty(&ei->i_prealloc_list) && needed) {
+               pa = list_entry(ei->i_prealloc_list.prev,
                                struct ext4_prealloc_space, pa_inode_list);
                BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
                spin_lock(&pa->pa_lock);
@@ -4146,10 +4314,11 @@ repeat:
 
                }
                if (pa->pa_deleted == 0) {
-                       pa->pa_deleted = 1;
+                       ext4_mb_mark_pa_deleted(sb, pa);
                        spin_unlock(&pa->pa_lock);
                        list_del_rcu(&pa->pa_inode_list);
                        list_add(&pa->u.pa_tmp_list, &list);
+                       needed--;
                        continue;
                }
 
@@ -4399,7 +4568,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
        ac->ac_g_ex = ac->ac_o_ex;
        ac->ac_flags = ar->flags;
 
-       /* we have to define context: we'll we work with a file or
+       /* we have to define context: we'll work with a file or
         * locality group. this is a policy, actually */
        ext4_mb_group_or_file(ac);
 
@@ -4450,7 +4619,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
                BUG_ON(pa->pa_type != MB_GROUP_PA);
 
                /* seems this one can be freed ... */
-               pa->pa_deleted = 1;
+               ext4_mb_mark_pa_deleted(sb, pa);
                spin_unlock(&pa->pa_lock);
 
                list_del_rcu(&pa->pa_inode_list);
@@ -4549,10 +4718,29 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
 }
 
 /*
+ * if per-inode prealloc list is too long, trim some PA
+ */
+static void ext4_mb_trim_inode_pa(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       int count, delta;
+
+       count = atomic_read(&ei->i_prealloc_active);
+       delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
+       if (count > sbi->s_mb_max_inode_prealloc + delta) {
+               count -= sbi->s_mb_max_inode_prealloc;
+               ext4_discard_preallocations(inode, count);
+       }
+}
+
+/*
  * release all resource we used in allocation
  */
 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
 {
+       struct inode *inode = ac->ac_inode;
+       struct ext4_inode_info *ei = EXT4_I(inode);
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_prealloc_space *pa = ac->ac_pa;
        if (pa) {
@@ -4564,21 +4752,31 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
                        pa->pa_free -= ac->ac_b_ex.fe_len;
                        pa->pa_len -= ac->ac_b_ex.fe_len;
                        spin_unlock(&pa->pa_lock);
+
+                       /*
+                        * We want to add the pa to the right bucket.
+                        * Remove it from the list and while adding
+                        * make sure the list to which we are adding
+                        * doesn't grow big.
+                        */
+                       if (likely(pa->pa_free)) {
+                               spin_lock(pa->pa_obj_lock);
+                               list_del_rcu(&pa->pa_inode_list);
+                               spin_unlock(pa->pa_obj_lock);
+                               ext4_mb_add_n_trim(ac);
+                       }
                }
-       }
-       if (pa) {
-               /*
-                * We want to add the pa to the right bucket.
-                * Remove it from the list and while adding
-                * make sure the list to which we are adding
-                * doesn't grow big.
-                */
-               if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
+
+               if (pa->pa_type == MB_INODE_PA) {
+                       /*
+                        * treat per-inode prealloc list as a lru list, then try
+                        * to trim the least recently used PA.
+                        */
                        spin_lock(pa->pa_obj_lock);
-                       list_del_rcu(&pa->pa_inode_list);
+                       list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
                        spin_unlock(pa->pa_obj_lock);
-                       ext4_mb_add_n_trim(ac);
                }
+
                ext4_mb_put_pa(ac, ac->ac_sb, pa);
        }
        if (ac->ac_bitmap_page)
@@ -4588,6 +4786,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
        if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
                mutex_unlock(&ac->ac_lg->lg_mutex);
        ext4_mb_collect_stats(ac);
+       ext4_mb_trim_inode_pa(inode);
        return 0;
 }
 
@@ -4915,7 +5114,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
 
        sbi = EXT4_SB(sb);
        if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
-           !ext4_data_block_valid(sbi, block, count)) {
+           !ext4_inode_block_valid(inode, block, count)) {
                ext4_error(sb, "Freeing blocks not in datazone - "
                           "block = %llu, count = %lu", block, count);
                goto error_return;
index 6b4d17c..e75b474 100644 (file)
  */
 #define MB_DEFAULT_GROUP_PREALLOC      512
 
+/*
+ * maximum length of inode prealloc list
+ */
+#define MB_DEFAULT_MAX_INODE_PREALLOC  512
 
 struct ext4_free_data {
        /* this links the free block information from sb_info */
index 1ed86fb..0d601b8 100644 (file)
@@ -686,8 +686,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
 
 out:
        if (*moved_len) {
-               ext4_discard_preallocations(orig_inode);
-               ext4_discard_preallocations(donor_inode);
+               ext4_discard_preallocations(orig_inode, 0);
+               ext4_discard_preallocations(donor_inode, 0);
        }
 
        ext4_ext_drop_refs(path);
index 56738b5..153a9fb 100644 (file)
@@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
                    ext4_match(dir, fname, de)) {
                        /* found a match - just to be sure, do
                         * a full check */
-                       if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
-                                                bh->b_size, offset))
+                       if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
+                                                buf_size, offset))
                                return -1;
                        *res_dir = de;
                        return 1;
@@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                             blocksize, hinfo, map);
        map -= count;
        dx_sort_map(map, count);
-       /* Split the existing block in the middle, size-wise */
+       /* Ensure that neither split block is over half full */
        size = 0;
        move = 0;
        for (i = count-1; i >= 0; i--) {
@@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                size += map[i].size;
                move++;
        }
-       /* map index at which we will split */
-       split = count - move;
+       /*
+        * map index at which we will split
+        *
+        * If the sum of active entries didn't exceed half the block size, just
+        * split it in half by count; each resulting block will have at least
+        * half the space free.
+        */
+       if (i > 0)
+               split = count - move;
+       else
+               split = count/2;
+
        hash2 = map[split].hash;
        continued = hash2 == map[split - 1].hash;
        dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
@@ -2455,8 +2465,7 @@ cleanup:
  * ext4_generic_delete_entry deletes a directory entry by merging it
  * with the previous entry
  */
-int ext4_generic_delete_entry(handle_t *handle,
-                             struct inode *dir,
+int ext4_generic_delete_entry(struct inode *dir,
                              struct ext4_dir_entry_2 *de_del,
                              struct buffer_head *bh,
                              void *entry_buf,
@@ -2472,7 +2481,7 @@ int ext4_generic_delete_entry(handle_t *handle,
        de = (struct ext4_dir_entry_2 *)entry_buf;
        while (i < buf_size - csum_size) {
                if (ext4_check_dir_entry(dir, NULL, de, bh,
-                                        bh->b_data, bh->b_size, i))
+                                        entry_buf, buf_size, i))
                        return -EFSCORRUPTED;
                if (de == de_del)  {
                        if (pde)
@@ -2517,8 +2526,7 @@ static int ext4_delete_entry(handle_t *handle,
        if (unlikely(err))
                goto out;
 
-       err = ext4_generic_delete_entry(handle, dir, de_del,
-                                       bh, bh->b_data,
+       err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data,
                                        dir->i_sb->s_blocksize, csum_size);
        if (err)
                goto out;
@@ -3193,30 +3201,33 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
         * in separate transaction */
        retval = dquot_initialize(dir);
        if (retval)
-               return retval;
+               goto out_trace;
        retval = dquot_initialize(d_inode(dentry));
        if (retval)
-               return retval;
+               goto out_trace;
 
-       retval = -ENOENT;
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
-       if (IS_ERR(bh))
-               return PTR_ERR(bh);
-       if (!bh)
-               goto end_unlink;
+       if (IS_ERR(bh)) {
+               retval = PTR_ERR(bh);
+               goto out_trace;
+       }
+       if (!bh) {
+               retval = -ENOENT;
+               goto out_trace;
+       }
 
        inode = d_inode(dentry);
 
-       retval = -EFSCORRUPTED;
-       if (le32_to_cpu(de->inode) != inode->i_ino)
-               goto end_unlink;
+       if (le32_to_cpu(de->inode) != inode->i_ino) {
+               retval = -EFSCORRUPTED;
+               goto out_bh;
+       }
 
        handle = ext4_journal_start(dir, EXT4_HT_DIR,
                                    EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
        if (IS_ERR(handle)) {
                retval = PTR_ERR(handle);
-               handle = NULL;
-               goto end_unlink;
+               goto out_bh;
        }
 
        if (IS_DIRSYNC(dir))
@@ -3224,12 +3235,12 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
 
        retval = ext4_delete_entry(handle, dir, de, bh);
        if (retval)
-               goto end_unlink;
+               goto out_handle;
        dir->i_ctime = dir->i_mtime = current_time(dir);
        ext4_update_dx_flag(dir);
        retval = ext4_mark_inode_dirty(handle, dir);
        if (retval)
-               goto end_unlink;
+               goto out_handle;
        if (inode->i_nlink == 0)
                ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
                                   dentry->d_name.len, dentry->d_name.name);
@@ -3251,10 +3262,11 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
                d_invalidate(dentry);
 #endif
 
-end_unlink:
+out_handle:
+       ext4_journal_stop(handle);
+out_bh:
        brelse(bh);
-       if (handle)
-               ext4_journal_stop(handle);
+out_trace:
        trace_ext4_unlink_exit(dentry, retval);
        return retval;
 }
index f2df2db..f014c5e 100644 (file)
@@ -140,7 +140,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
                        return;
                }
                ctx->cur_step++;
-               /* fall-through */
+               fallthrough;
        case STEP_VERITY:
                if (ctx->enabled_steps & (1 << STEP_VERITY)) {
                        INIT_WORK(&ctx->work, verity_work);
@@ -148,7 +148,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
                        return;
                }
                ctx->cur_step++;
-               /* fall-through */
+               fallthrough;
        default:
                __read_end_io(ctx->bio);
        }
index 0907f90..ea425b4 100644 (file)
@@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
                             unsigned long journal_devnum);
 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
 static int ext4_commit_super(struct super_block *sb, int sync);
-static void ext4_mark_recovery_complete(struct super_block *sb,
+static int ext4_mark_recovery_complete(struct super_block *sb,
                                        struct ext4_super_block *es);
-static void ext4_clear_journal_err(struct super_block *sb,
-                                  struct ext4_super_block *es);
+static int ext4_clear_journal_err(struct super_block *sb,
+                                 struct ext4_super_block *es);
 static int ext4_sync_fs(struct super_block *sb, int wait);
 static int ext4_remount(struct super_block *sb, int *flags, char *data);
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -744,6 +744,7 @@ void __ext4_msg(struct super_block *sb,
        struct va_format vaf;
        va_list args;
 
+       atomic_inc(&EXT4_SB(sb)->s_msg_count);
        if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
                return;
 
@@ -754,9 +755,12 @@ void __ext4_msg(struct super_block *sb,
        va_end(args);
 }
 
-#define ext4_warning_ratelimit(sb)                                     \
-               ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \
-                            "EXT4-fs warning")
+static int ext4_warning_ratelimit(struct super_block *sb)
+{
+       atomic_inc(&EXT4_SB(sb)->s_warning_count);
+       return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+                           "EXT4-fs warning");
+}
 
 void __ext4_warning(struct super_block *sb, const char *function,
                    unsigned int line, const char *fmt, ...)
@@ -1123,6 +1127,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        inode_set_iversion(&ei->vfs_inode, 1);
        spin_lock_init(&ei->i_raw_lock);
        INIT_LIST_HEAD(&ei->i_prealloc_list);
+       atomic_set(&ei->i_prealloc_active, 0);
        spin_lock_init(&ei->i_prealloc_lock);
        ext4_es_init_tree(&ei->i_es_tree);
        rwlock_init(&ei->i_es_lock);
@@ -1216,7 +1221,7 @@ void ext4_clear_inode(struct inode *inode)
 {
        invalidate_inode_buffers(inode);
        clear_inode(inode);
-       ext4_discard_preallocations(inode);
+       ext4_discard_preallocations(inode, 0);
        ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
        dquot_drop(inode);
        if (EXT4_I(inode)->jinode) {
@@ -1288,8 +1293,8 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
        if (!page_has_buffers(page))
                return 0;
        if (journal)
-               return jbd2_journal_try_to_free_buffers(journal, page,
-                                               wait & ~__GFP_DIRECT_RECLAIM);
+               return jbd2_journal_try_to_free_buffers(journal, page);
+
        return try_to_free_buffers(page);
 }
 
@@ -1522,6 +1527,7 @@ enum {
        Opt_dioread_nolock, Opt_dioread_lock,
        Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
        Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
+       Opt_prefetch_block_bitmaps,
 };
 
 static const match_table_t tokens = {
@@ -1614,6 +1620,7 @@ static const match_table_t tokens = {
        {Opt_inlinecrypt, "inlinecrypt"},
        {Opt_nombcache, "nombcache"},
        {Opt_nombcache, "no_mbcache"},  /* for backward compatibility */
+       {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"},
        {Opt_removed, "check=none"},    /* mount option from ext2/3 */
        {Opt_removed, "nocheck"},       /* mount option from ext2/3 */
        {Opt_removed, "reservation"},   /* mount option from ext2/3 */
@@ -1831,6 +1838,8 @@ static const struct mount_opts {
        {Opt_max_dir_size_kb, 0, MOPT_GTE0},
        {Opt_test_dummy_encryption, 0, MOPT_STRING},
        {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
+       {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS,
+        MOPT_SET},
        {Opt_err, 0, 0}
 };
 
@@ -3213,15 +3222,34 @@ static void print_daily_error_info(struct timer_list *t)
 static int ext4_run_li_request(struct ext4_li_request *elr)
 {
        struct ext4_group_desc *gdp = NULL;
-       ext4_group_t group, ngroups;
-       struct super_block *sb;
+       struct super_block *sb = elr->lr_super;
+       ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+       ext4_group_t group = elr->lr_next_group;
        unsigned long timeout = 0;
+       unsigned int prefetch_ios = 0;
        int ret = 0;
 
-       sb = elr->lr_super;
-       ngroups = EXT4_SB(sb)->s_groups_count;
+       if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
+               elr->lr_next_group = ext4_mb_prefetch(sb, group,
+                               EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
+               if (prefetch_ios)
+                       ext4_mb_prefetch_fini(sb, elr->lr_next_group,
+                                             prefetch_ios);
+               trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
+                                           prefetch_ios);
+               if (group >= elr->lr_next_group) {
+                       ret = 1;
+                       if (elr->lr_first_not_zeroed != ngroups &&
+                           !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) {
+                               elr->lr_next_group = elr->lr_first_not_zeroed;
+                               elr->lr_mode = EXT4_LI_MODE_ITABLE;
+                               ret = 0;
+                       }
+               }
+               return ret;
+       }
 
-       for (group = elr->lr_next_group; group < ngroups; group++) {
+       for (; group < ngroups; group++) {
                gdp = ext4_get_group_desc(sb, group, NULL);
                if (!gdp) {
                        ret = 1;
@@ -3239,9 +3267,10 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
                timeout = jiffies;
                ret = ext4_init_inode_table(sb, group,
                                            elr->lr_timeout ? 0 : 1);
+               trace_ext4_lazy_itable_init(sb, group);
                if (elr->lr_timeout == 0) {
                        timeout = (jiffies - timeout) *
-                                 elr->lr_sbi->s_li_wait_mult;
+                               EXT4_SB(elr->lr_super)->s_li_wait_mult;
                        elr->lr_timeout = timeout;
                }
                elr->lr_next_sched = jiffies + elr->lr_timeout;
@@ -3256,15 +3285,11 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
  */
 static void ext4_remove_li_request(struct ext4_li_request *elr)
 {
-       struct ext4_sb_info *sbi;
-
        if (!elr)
                return;
 
-       sbi = elr->lr_sbi;
-
        list_del(&elr->lr_request);
-       sbi->s_li_request = NULL;
+       EXT4_SB(elr->lr_super)->s_li_request = NULL;
        kfree(elr);
 }
 
@@ -3473,7 +3498,6 @@ static int ext4_li_info_new(void)
 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
                                            ext4_group_t start)
 {
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_li_request *elr;
 
        elr = kzalloc(sizeof(*elr), GFP_KERNEL);
@@ -3481,8 +3505,13 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
                return NULL;
 
        elr->lr_super = sb;
-       elr->lr_sbi = sbi;
-       elr->lr_next_group = start;
+       elr->lr_first_not_zeroed = start;
+       if (test_opt(sb, PREFETCH_BLOCK_BITMAPS))
+               elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
+       else {
+               elr->lr_mode = EXT4_LI_MODE_ITABLE;
+               elr->lr_next_group = start;
+       }
 
        /*
         * Randomize first schedule time of the request to
@@ -3512,8 +3541,9 @@ int ext4_register_li_request(struct super_block *sb,
                goto out;
        }
 
-       if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
-           !test_opt(sb, INIT_INODE_TABLE))
+       if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) &&
+           (first_not_zeroed == ngroups || sb_rdonly(sb) ||
+            !test_opt(sb, INIT_INODE_TABLE)))
                goto out;
 
        elr = ext4_li_request_new(sb, first_not_zeroed);
@@ -4710,11 +4740,13 @@ no_journal:
 
        ext4_set_resv_clusters(sb);
 
-       err = ext4_setup_system_zone(sb);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "failed to initialize system "
-                        "zone (%d)", err);
-               goto failed_mount4a;
+       if (test_opt(sb, BLOCK_VALIDITY)) {
+               err = ext4_setup_system_zone(sb);
+               if (err) {
+                       ext4_msg(sb, KERN_ERR, "failed to initialize system "
+                                "zone (%d)", err);
+                       goto failed_mount4a;
+               }
        }
 
        ext4_ext_init(sb);
@@ -4777,12 +4809,23 @@ no_journal:
        }
 #endif  /* CONFIG_QUOTA */
 
+       /*
+        * Save the original bdev mapping's wb_err value which could be
+        * used to detect the metadata async write error.
+        */
+       spin_lock_init(&sbi->s_bdev_wb_lock);
+       if (!sb_rdonly(sb))
+               errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+                                        &sbi->s_bdev_wb_err);
+       sb->s_bdev->bd_super = sb;
        EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
        ext4_orphan_cleanup(sb, es);
        EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
        if (needs_recovery) {
                ext4_msg(sb, KERN_INFO, "recovery complete");
-               ext4_mark_recovery_complete(sb, es);
+               err = ext4_mark_recovery_complete(sb, es);
+               if (err)
+                       goto failed_mount8;
        }
        if (EXT4_SB(sb)->s_journal) {
                if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
@@ -4816,6 +4859,8 @@ no_journal:
        ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
        ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
        ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+       atomic_set(&sbi->s_warning_count, 0);
+       atomic_set(&sbi->s_msg_count, 0);
 
        kfree(orig_data);
        return 0;
@@ -4825,10 +4870,8 @@ cantfind_ext4:
                ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
        goto failed_mount;
 
-#ifdef CONFIG_QUOTA
 failed_mount8:
        ext4_unregister_sysfs(sb);
-#endif
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
@@ -4968,7 +5011,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
        struct inode *journal_inode;
        journal_t *journal;
 
-       BUG_ON(!ext4_has_feature_journal(sb));
+       if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+               return NULL;
 
        journal_inode = ext4_get_journal_inode(sb, journal_inum);
        if (!journal_inode)
@@ -4998,7 +5042,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
        struct ext4_super_block *es;
        struct block_device *bdev;
 
-       BUG_ON(!ext4_has_feature_journal(sb));
+       if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+               return NULL;
 
        bdev = ext4_blkdev_get(j_dev, sb);
        if (bdev == NULL)
@@ -5089,8 +5134,10 @@ static int ext4_load_journal(struct super_block *sb,
        dev_t journal_dev;
        int err = 0;
        int really_read_only;
+       int journal_dev_ro;
 
-       BUG_ON(!ext4_has_feature_journal(sb));
+       if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
+               return -EFSCORRUPTED;
 
        if (journal_devnum &&
            journal_devnum != le32_to_cpu(es->s_journal_dev)) {
@@ -5100,7 +5147,31 @@ static int ext4_load_journal(struct super_block *sb,
        } else
                journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
 
-       really_read_only = bdev_read_only(sb->s_bdev);
+       if (journal_inum && journal_dev) {
+               ext4_msg(sb, KERN_ERR,
+                        "filesystem has both journal inode and journal device!");
+               return -EINVAL;
+       }
+
+       if (journal_inum) {
+               journal = ext4_get_journal(sb, journal_inum);
+               if (!journal)
+                       return -EINVAL;
+       } else {
+               journal = ext4_get_dev_journal(sb, journal_dev);
+               if (!journal)
+                       return -EINVAL;
+       }
+
+       journal_dev_ro = bdev_read_only(journal->j_dev);
+       really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
+
+       if (journal_dev_ro && !sb_rdonly(sb)) {
+               ext4_msg(sb, KERN_ERR,
+                        "journal device read-only, try mounting with '-o ro'");
+               err = -EROFS;
+               goto err_out;
+       }
 
        /*
         * Are we loading a blank journal or performing recovery after a
@@ -5115,27 +5186,14 @@ static int ext4_load_journal(struct super_block *sb,
                                ext4_msg(sb, KERN_ERR, "write access "
                                        "unavailable, cannot proceed "
                                        "(try mounting with noload)");
-                               return -EROFS;
+                               err = -EROFS;
+                               goto err_out;
                        }
                        ext4_msg(sb, KERN_INFO, "write access will "
                               "be enabled during recovery");
                }
        }
 
-       if (journal_inum && journal_dev) {
-               ext4_msg(sb, KERN_ERR, "filesystem has both journal "
-                      "and inode journals!");
-               return -EINVAL;
-       }
-
-       if (journal_inum) {
-               if (!(journal = ext4_get_journal(sb, journal_inum)))
-                       return -EINVAL;
-       } else {
-               if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
-                       return -EINVAL;
-       }
-
        if (!(journal->j_flags & JBD2_BARRIER))
                ext4_msg(sb, KERN_INFO, "barriers disabled");
 
@@ -5155,12 +5213,16 @@ static int ext4_load_journal(struct super_block *sb,
 
        if (err) {
                ext4_msg(sb, KERN_ERR, "error loading journal");
-               jbd2_journal_destroy(journal);
-               return err;
+               goto err_out;
        }
 
        EXT4_SB(sb)->s_journal = journal;
-       ext4_clear_journal_err(sb, es);
+       err = ext4_clear_journal_err(sb, es);
+       if (err) {
+               EXT4_SB(sb)->s_journal = NULL;
+               jbd2_journal_destroy(journal);
+               return err;
+       }
 
        if (!really_read_only && journal_devnum &&
            journal_devnum != le32_to_cpu(es->s_journal_dev)) {
@@ -5171,6 +5233,10 @@ static int ext4_load_journal(struct super_block *sb,
        }
 
        return 0;
+
+err_out:
+       jbd2_journal_destroy(journal);
+       return err;
 }
 
 static int ext4_commit_super(struct super_block *sb, int sync)
@@ -5183,13 +5249,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                return error;
 
        /*
-        * The superblock bh should be mapped, but it might not be if the
-        * device was hot-removed. Not much we can do but fail the I/O.
-        */
-       if (!buffer_mapped(sbh))
-               return error;
-
-       /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
         * write time when we are mounting the root file system
@@ -5256,26 +5315,32 @@ static int ext4_commit_super(struct super_block *sb, int sync)
  * remounting) the filesystem readonly, then we will end up with a
  * consistent fs on disk.  Record that fact.
  */
-static void ext4_mark_recovery_complete(struct super_block *sb,
-                                       struct ext4_super_block *es)
+static int ext4_mark_recovery_complete(struct super_block *sb,
+                                      struct ext4_super_block *es)
 {
+       int err;
        journal_t *journal = EXT4_SB(sb)->s_journal;
 
        if (!ext4_has_feature_journal(sb)) {
-               BUG_ON(journal != NULL);
-               return;
+               if (journal != NULL) {
+                       ext4_error(sb, "Journal got removed while the fs was "
+                                  "mounted!");
+                       return -EFSCORRUPTED;
+               }
+               return 0;
        }
        jbd2_journal_lock_updates(journal);
-       if (jbd2_journal_flush(journal) < 0)
+       err = jbd2_journal_flush(journal);
+       if (err < 0)
                goto out;
 
        if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
                ext4_clear_feature_journal_needs_recovery(sb);
                ext4_commit_super(sb, 1);
        }
-
 out:
        jbd2_journal_unlock_updates(journal);
+       return err;
 }
 
 /*
@@ -5283,14 +5348,17 @@ out:
  * has recorded an error from a previous lifetime, move that error to the
  * main filesystem now.
  */
-static void ext4_clear_journal_err(struct super_block *sb,
+static int ext4_clear_journal_err(struct super_block *sb,
                                   struct ext4_super_block *es)
 {
        journal_t *journal;
        int j_errno;
        const char *errstr;
 
-       BUG_ON(!ext4_has_feature_journal(sb));
+       if (!ext4_has_feature_journal(sb)) {
+               ext4_error(sb, "Journal got removed while the fs was mounted!");
+               return -EFSCORRUPTED;
+       }
 
        journal = EXT4_SB(sb)->s_journal;
 
@@ -5315,6 +5383,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
                jbd2_journal_clear_err(journal);
                jbd2_journal_update_sb_errno(journal);
        }
+       return 0;
 }
 
 /*
@@ -5457,7 +5526,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ext4_super_block *es;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       unsigned long old_sb_flags;
+       unsigned long old_sb_flags, vfs_flags;
        struct ext4_mount_options old_opts;
        int enable_quota = 0;
        ext4_group_t g;
@@ -5500,6 +5569,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_journal && sbi->s_journal->j_task->io_context)
                journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
+       /*
+        * Some options can be enabled by ext4 and/or by VFS mount flag
+        * either way we need to make sure it matches in both *flags and
+        * s_flags. Copy those selected flags from *flags to s_flags
+        */
+       vfs_flags = SB_LAZYTIME | SB_I_VERSION;
+       sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
+
        if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
                err = -EINVAL;
                goto restore_opts;
@@ -5553,9 +5630,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
        }
 
-       if (*flags & SB_LAZYTIME)
-               sb->s_flags |= SB_LAZYTIME;
-
        if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
                if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
                        err = -EROFS;
@@ -5585,8 +5659,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                            (sbi->s_mount_state & EXT4_VALID_FS))
                                es->s_state = cpu_to_le16(sbi->s_mount_state);
 
-                       if (sbi->s_journal)
+                       if (sbi->s_journal) {
+                               /*
+                                * We let remount-ro finish even if marking fs
+                                * as clean failed...
+                                */
                                ext4_mark_recovery_complete(sb, es);
+                       }
                        if (sbi->s_mmp_tsk)
                                kthread_stop(sbi->s_mmp_tsk);
                } else {
@@ -5629,13 +5708,24 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                        }
 
                        /*
+                        * Update the original bdev mapping's wb_err value
+                        * which could be used to detect the metadata async
+                        * write error.
+                        */
+                       errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+                                                &sbi->s_bdev_wb_err);
+
+                       /*
                         * Mounting a RDONLY partition read-write, so reread
                         * and store the current valid flag.  (It may have
                         * been changed by e2fsck since we originally mounted
                         * the partition.)
                         */
-                       if (sbi->s_journal)
-                               ext4_clear_journal_err(sb, es);
+                       if (sbi->s_journal) {
+                               err = ext4_clear_journal_err(sb, es);
+                               if (err)
+                                       goto restore_opts;
+                       }
                        sbi->s_mount_state = le16_to_cpu(es->s_state);
 
                        err = ext4_setup_super(sb, es, 0);
@@ -5665,7 +5755,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                ext4_register_li_request(sb, first_not_zeroed);
        }
 
-       ext4_setup_system_zone(sb);
+       /*
+        * Handle creation of system zone data early because it can fail.
+        * Releasing of existing data is done when we are sure remount will
+        * succeed.
+        */
+       if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
+               err = ext4_setup_system_zone(sb);
+               if (err)
+                       goto restore_opts;
+       }
+
        if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
                err = ext4_commit_super(sb, 1);
                if (err)
@@ -5686,8 +5786,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                }
        }
 #endif
+       if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+               ext4_release_system_zone(sb);
+
+       /*
+        * Some options can be enabled by ext4 and/or by VFS mount flag
+        * either way we need to make sure it matches in both *flags and
+        * s_flags. Copy those selected flags from s_flags to *flags
+        */
+       *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
 
-       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
        ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
        kfree(orig_data);
        return 0;
@@ -5701,6 +5809,8 @@ restore_opts:
        sbi->s_commit_interval = old_opts.s_commit_interval;
        sbi->s_min_batch_time = old_opts.s_min_batch_time;
        sbi->s_max_batch_time = old_opts.s_max_batch_time;
+       if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
+               ext4_release_system_zone(sb);
 #ifdef CONFIG_QUOTA
        sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
        for (i = 0; i < EXT4_MAXQUOTAS; i++) {
index 6c9fc9e..bfabb79 100644 (file)
@@ -189,6 +189,9 @@ static struct ext4_attr ext4_attr_##_name = {                       \
 #define EXT4_RW_ATTR_SBI_UL(_name,_elname)     \
        EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname)
 
+#define EXT4_RO_ATTR_SBI_ATOMIC(_name,_elname) \
+       EXT4_ATTR_OFFSET(_name, 0444, pointer_atomic, ext4_sb_info, _elname)
+
 #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \
 static struct ext4_attr ext4_attr_##_name = {                  \
        .attr = {.name = __stringify(_name), .mode = _mode },   \
@@ -215,6 +218,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
 EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
@@ -226,6 +230,8 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
 #ifdef CONFIG_EXT4_DEBUG
 EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
 #endif
+EXT4_RO_ATTR_SBI_ATOMIC(warning_count, s_warning_count);
+EXT4_RO_ATTR_SBI_ATOMIC(msg_count, s_msg_count);
 EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
 EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode);
 EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode);
@@ -240,6 +246,8 @@ EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32);
 EXT4_ATTR(first_error_time, 0444, first_error_time);
 EXT4_ATTR(last_error_time, 0444, last_error_time);
 EXT4_ATTR(journal_task, 0444, journal_task);
+EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
+EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
 
 static unsigned int old_bump_val = 128;
 EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -257,6 +265,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(mb_order2_req),
        ATTR_LIST(mb_stream_req),
        ATTR_LIST(mb_group_prealloc),
+       ATTR_LIST(mb_max_inode_prealloc),
        ATTR_LIST(max_writeback_mb_bump),
        ATTR_LIST(extent_max_zeroout_kb),
        ATTR_LIST(trigger_fs_error),
@@ -267,6 +276,8 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(msg_ratelimit_interval_ms),
        ATTR_LIST(msg_ratelimit_burst),
        ATTR_LIST(errors_count),
+       ATTR_LIST(warning_count),
+       ATTR_LIST(msg_count),
        ATTR_LIST(first_error_ino),
        ATTR_LIST(last_error_ino),
        ATTR_LIST(first_error_block),
@@ -283,6 +294,8 @@ static struct attribute *ext4_attrs[] = {
 #ifdef CONFIG_EXT4_DEBUG
        ATTR_LIST(simulate_fail),
 #endif
+       ATTR_LIST(mb_prefetch),
+       ATTR_LIST(mb_prefetch_limit),
        NULL,
 };
 ATTRIBUTE_GROUPS(ext4);
index 7d2f657..cba4b87 100644 (file)
@@ -1356,8 +1356,7 @@ retry:
 
        block = 0;
        while (wsize < bufsize) {
-               if (bh != NULL)
-                       brelse(bh);
+               brelse(bh);
                csize = (bufsize - wsize) > blocksize ? blocksize :
                                                                bufsize - wsize;
                bh = ext4_getblk(handle, ea_inode, block, 0);
index 2360649..ff807e1 100644 (file)
@@ -523,7 +523,7 @@ void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
        __remove_ino_entry(sbi, ino, type);
 }
 
-/* mode should be APPEND_INO or UPDATE_INO */
+/* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */
 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
 {
        struct inode_management *im = &sbi->im[mode];
@@ -1258,8 +1258,6 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
        DEFINE_WAIT(wait);
 
        for (;;) {
-               prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
-
                if (!get_pages(sbi, type))
                        break;
 
@@ -1269,6 +1267,10 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
                if (type == F2FS_DIRTY_META)
                        f2fs_sync_meta_pages(sbi, META, LONG_MAX,
                                                        FS_CP_META_IO);
+               else if (type == F2FS_WB_CP_DATA)
+                       f2fs_submit_merged_write(sbi, DATA);
+
+               prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
                io_schedule_timeout(DEFAULT_IO_TIMEOUT);
        }
        finish_wait(&sbi->cp_wait, &wait);
@@ -1415,7 +1417,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
                                curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
        }
 
-       /* 2 cp  + n data seg summary + orphan inode blocks */
+       /* 2 cp + n data seg summary + orphan inode blocks */
        data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
        spin_lock_irqsave(&sbi->cp_lock, flags);
        if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
@@ -1515,9 +1517,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
        /*
         * invalidate intermediate page cache borrowed from meta inode which are
-        * used for migration of encrypted or verity inode's blocks.
+        * used for migration of encrypted, verity or compressed inode's blocks.
         */
-       if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
+       if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
+               f2fs_sb_has_compression(sbi))
                invalidate_mapping_pages(META_MAPPING(sbi),
                                MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
index 29e50fb..1dfb126 100644 (file)
@@ -49,6 +49,13 @@ bool f2fs_is_compressed_page(struct page *page)
                return false;
        if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
                return false;
+       /*
+        * page->private may be set with pid.
+        * pid_max is enough to check if it is traced.
+        */
+       if (IS_IO_TRACED_PAGE(page))
+               return false;
+
        f2fs_bug_on(F2FS_M_SB(page->mapping),
                *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
        return true;
@@ -506,7 +513,7 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
        return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
 }
 
-static mempool_t *compress_page_pool = NULL;
+static mempool_t *compress_page_pool;
 static int num_compress_pages = 512;
 module_param(num_compress_pages, uint, 0444);
 MODULE_PARM_DESC(num_compress_pages,
@@ -663,6 +670,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
        const struct f2fs_compress_ops *cops =
                        f2fs_cops[fi->i_compress_algorithm];
        int ret;
+       int i;
 
        dec_page_count(sbi, F2FS_RD_DATA);
 
@@ -681,6 +689,26 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
                goto out_free_dic;
        }
 
+       dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+                                       dic->cluster_size, GFP_NOFS);
+       if (!dic->tpages) {
+               ret = -ENOMEM;
+               goto out_free_dic;
+       }
+
+       for (i = 0; i < dic->cluster_size; i++) {
+               if (dic->rpages[i]) {
+                       dic->tpages[i] = dic->rpages[i];
+                       continue;
+               }
+
+               dic->tpages[i] = f2fs_compress_alloc_page();
+               if (!dic->tpages[i]) {
+                       ret = -ENOMEM;
+                       goto out_free_dic;
+               }
+       }
+
        if (cops->init_decompress_ctx) {
                ret = cops->init_decompress_ctx(dic);
                if (ret)
@@ -821,7 +849,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
 }
 
 /* return # of valid blocks in compressed cluster */
-static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
+static int f2fs_cluster_blocks(struct compress_ctx *cc)
 {
        return __f2fs_cluster_blocks(cc, false);
 }
@@ -835,7 +863,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
                .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
        };
 
-       return f2fs_cluster_blocks(&cc, false);
+       return f2fs_cluster_blocks(&cc);
 }
 
 static bool cluster_may_compress(struct compress_ctx *cc)
@@ -886,7 +914,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
        bool prealloc;
 
 retry:
-       ret = f2fs_cluster_blocks(cc, false);
+       ret = f2fs_cluster_blocks(cc);
        if (ret <= 0)
                return ret;
 
@@ -949,7 +977,7 @@ retry:
        }
 
        if (prealloc) {
-               __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+               f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
 
                set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
 
@@ -964,7 +992,7 @@ retry:
                                break;
                }
 
-               __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+               f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
        }
 
        if (likely(!ret)) {
@@ -1096,8 +1124,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
        loff_t psize;
        int i, err;
 
-       if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
+       if (IS_NOQUOTA(inode)) {
+               /*
+                * We need to wait for node_write to avoid block allocation during
+                * checkpoint. This can only happen to quota writes which can cause
+                * the below discard race condition.
+                */
+               down_read(&sbi->node_write);
+       } else if (!f2fs_trylock_op(sbi)) {
                return -EAGAIN;
+       }
 
        set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
 
@@ -1137,6 +1173,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
                f2fs_set_compressed_page(cc->cpages[i], inode,
                                        cc->rpages[i + 1]->index, cic);
                fio.compressed_page = cc->cpages[i];
+
+               fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
+                                               dn.ofs_in_node + i + 1);
+
+               /* wait for GCed page writeback via META_MAPPING */
+               f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
+
                if (fio.encrypted) {
                        fio.page = cc->rpages[i + 1];
                        err = f2fs_encrypt_one_page(&fio);
@@ -1203,7 +1246,9 @@ unlock_continue:
                set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
 
        f2fs_put_dnode(&dn);
-       if (!IS_NOQUOTA(inode))
+       if (IS_NOQUOTA(inode))
+               up_read(&sbi->node_write);
+       else
                f2fs_unlock_op(sbi);
 
        spin_lock(&fi->i_size_lock);
@@ -1230,7 +1275,9 @@ out_put_cic:
 out_put_dnode:
        f2fs_put_dnode(&dn);
 out_unlock_op:
-       if (!IS_NOQUOTA(inode))
+       if (IS_NOQUOTA(inode))
+               up_read(&sbi->node_write);
+       else
                f2fs_unlock_op(sbi);
        return -EAGAIN;
 }
@@ -1310,6 +1357,12 @@ retry_write:
                                congestion_wait(BLK_RW_ASYNC,
                                                DEFAULT_IO_TIMEOUT);
                                lock_page(cc->rpages[i]);
+
+                               if (!PageDirty(cc->rpages[i])) {
+                                       unlock_page(cc->rpages[i]);
+                                       continue;
+                               }
+
                                clear_page_dirty_for_io(cc->rpages[i]);
                                goto retry_write;
                        }
@@ -1353,6 +1406,8 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
                err = f2fs_write_compressed_pages(cc, submitted,
                                                        wbc, io_type);
                cops->destroy_compress_ctx(cc);
+               kfree(cc->cpages);
+               cc->cpages = NULL;
                if (!err)
                        return 0;
                f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@@ -1415,22 +1470,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
                dic->cpages[i] = page;
        }
 
-       dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
-                                       dic->cluster_size, GFP_NOFS);
-       if (!dic->tpages)
-               goto out_free;
-
-       for (i = 0; i < dic->cluster_size; i++) {
-               if (cc->rpages[i]) {
-                       dic->tpages[i] = cc->rpages[i];
-                       continue;
-               }
-
-               dic->tpages[i] = f2fs_compress_alloc_page();
-               if (!dic->tpages[i])
-                       goto out_free;
-       }
-
        return dic;
 
 out_free:
index 5f52707..ed2bca0 100644 (file)
@@ -87,7 +87,7 @@ static bool __is_cp_guaranteed(struct page *page)
        sbi = F2FS_I_SB(inode);
 
        if (inode->i_ino == F2FS_META_INO(sbi) ||
-                       inode->i_ino ==  F2FS_NODE_INO(sbi) ||
+                       inode->i_ino == F2FS_NODE_INO(sbi) ||
                        S_ISDIR(inode->i_mode) ||
                        (S_ISREG(inode->i_mode) &&
                        (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
@@ -1073,12 +1073,13 @@ static void f2fs_release_read_bio(struct bio *bio)
 
 /* This can handle encryption stuffs */
 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
-                                               block_t blkaddr, bool for_write)
+                                block_t blkaddr, int op_flags, bool for_write)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct bio *bio;
 
-       bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write);
+       bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
+                                       page->index, for_write);
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
@@ -1193,7 +1194,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
 
 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
 {
-       struct extent_info ei  = {0,0,0};
+       struct extent_info ei = {0, 0, 0};
        struct inode *inode = dn->inode;
 
        if (f2fs_lookup_extent_cache(inode, index, &ei)) {
@@ -1265,7 +1266,8 @@ got_it:
                return page;
        }
 
-       err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write);
+       err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+                                               op_flags, for_write);
        if (err)
                goto put_err;
        return page;
@@ -1414,7 +1416,7 @@ alloc:
        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
        old_blkaddr = dn->data_blkaddr;
        f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
-                                       &sum, seg_type, NULL, false);
+                                       &sum, seg_type, NULL);
        if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
                invalidate_mapping_pages(META_MAPPING(sbi),
                                        old_blkaddr, old_blkaddr);
@@ -1474,7 +1476,7 @@ map_blocks:
        return err;
 }
 
-void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
 {
        if (flag == F2FS_GET_BLOCK_PRE_AIO) {
                if (lock)
@@ -1539,7 +1541,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 
 next_dnode:
        if (map->m_may_create)
-               __do_map_lock(sbi, flag, true);
+               f2fs_do_map_lock(sbi, flag, true);
 
        /* When reading holes, we need its node page */
        set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1688,7 +1690,7 @@ skip:
        f2fs_put_dnode(&dn);
 
        if (map->m_may_create) {
-               __do_map_lock(sbi, flag, false);
+               f2fs_do_map_lock(sbi, flag, false);
                f2fs_balance_fs(sbi, dn.node_changed);
        }
        goto next_dnode;
@@ -1714,7 +1716,7 @@ sync_out:
        f2fs_put_dnode(&dn);
 unlock_out:
        if (map->m_may_create) {
-               __do_map_lock(sbi, flag, false);
+               f2fs_do_map_lock(sbi, flag, false);
                f2fs_balance_fs(sbi, dn.node_changed);
        }
 out:
@@ -1861,6 +1863,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
                        flags |= FIEMAP_EXTENT_LAST;
 
                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+               trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
                if (err || err == 1)
                        return err;
        }
@@ -1884,8 +1887,10 @@ static int f2fs_xattr_fiemap(struct inode *inode,
                flags = FIEMAP_EXTENT_LAST;
        }
 
-       if (phys)
+       if (phys) {
                err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+               trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
+       }
 
        return (err < 0 ? err : 0);
 }
@@ -1979,6 +1984,7 @@ next:
 
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                phys, size, flags);
+               trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
                if (ret)
                        goto out;
                size = 0;
@@ -2213,9 +2219,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
        if (ret)
                goto out;
 
-       /* cluster was overwritten as normal cluster */
-       if (dn.data_blkaddr != COMPRESS_ADDR)
-               goto out;
+       f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
 
        for (i = 1; i < cc->cluster_size; i++) {
                block_t blkaddr;
@@ -2342,6 +2346,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
        unsigned nr_pages = rac ? readahead_count(rac) : 1;
        unsigned max_nr_pages = nr_pages;
        int ret = 0;
+       bool drop_ra = false;
 
        map.m_pblk = 0;
        map.m_lblk = 0;
@@ -2352,10 +2357,26 @@ static int f2fs_mpage_readpages(struct inode *inode,
        map.m_seg_type = NO_CHECK_TYPE;
        map.m_may_create = false;
 
+       /*
+        * Two readahead threads for same address range can cause race condition
+        * which fragments sequential read IOs. So let's avoid each other.
+        */
+       if (rac && readahead_count(rac)) {
+               if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
+                       drop_ra = true;
+               else
+                       WRITE_ONCE(F2FS_I(inode)->ra_offset,
+                                               readahead_index(rac));
+       }
+
        for (; nr_pages; nr_pages--) {
                if (rac) {
                        page = readahead_page(rac);
                        prefetchw(&page->flags);
+                       if (drop_ra) {
+                               f2fs_put_page(page, 1);
+                               continue;
+                       }
                }
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -2418,6 +2439,9 @@ next_page:
        }
        if (bio)
                __submit_bio(F2FS_I_SB(inode), bio, DATA);
+
+       if (rac && readahead_count(rac) && !drop_ra)
+               WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
        return ret;
 }
 
@@ -2772,8 +2796,20 @@ write:
 
        /* Dentry/quota blocks are controlled by checkpoint */
        if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
+               /*
+                * We need to wait for node_write to avoid block allocation during
+                * checkpoint. This can only happen to quota writes which can cause
+                * the below discard race condition.
+                */
+               if (IS_NOQUOTA(inode))
+                       down_read(&sbi->node_write);
+
                fio.need_lock = LOCK_DONE;
                err = f2fs_do_write_data_page(&fio);
+
+               if (IS_NOQUOTA(inode))
+                       up_read(&sbi->node_write);
+
                goto done;
        }
 
@@ -3268,7 +3304,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
 
        if (f2fs_has_inline_data(inode) ||
                        (pos & PAGE_MASK) >= i_size_read(inode)) {
-               __do_map_lock(sbi, flag, true);
+               f2fs_do_map_lock(sbi, flag, true);
                locked = true;
        }
 
@@ -3305,7 +3341,7 @@ restart:
                        err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
                        if (err || dn.data_blkaddr == NULL_ADDR) {
                                f2fs_put_dnode(&dn);
-                               __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+                               f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
                                                                true);
                                WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
                                locked = true;
@@ -3321,7 +3357,7 @@ out:
        f2fs_put_dnode(&dn);
 unlock_out:
        if (locked)
-               __do_map_lock(sbi, flag, false);
+               f2fs_do_map_lock(sbi, flag, false);
        return err;
 }
 
@@ -3433,7 +3469,7 @@ repeat:
                        err = -EFSCORRUPTED;
                        goto fail;
                }
-               err = f2fs_submit_page_read(inode, page, blkaddr, true);
+               err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
                if (err)
                        goto fail;
 
@@ -3483,6 +3519,10 @@ static int f2fs_write_end(struct file *file,
        if (f2fs_compressed_file(inode) && fsdata) {
                f2fs_compress_write_end(inode, fsdata, page->index, copied);
                f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+               if (pos + copied > i_size_read(inode) &&
+                               !f2fs_verity_in_progress(inode))
+                       f2fs_i_size_write(inode, pos + copied);
                return copied;
        }
 #endif
@@ -3742,10 +3782,9 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
        }
 
        f2fs_put_dnode(&dn);
-
        return blknr;
 #else
-       return -EOPNOTSUPP;
+       return 0;
 #endif
 }
 
@@ -3753,18 +3792,26 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
 {
        struct inode *inode = mapping->host;
+       struct buffer_head tmp = {
+               .b_size = i_blocksize(inode),
+       };
+       sector_t blknr = 0;
 
        if (f2fs_has_inline_data(inode))
-               return 0;
+               goto out;
 
        /* make sure allocating whole blocks */
        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
                filemap_write_and_wait(mapping);
 
        if (f2fs_compressed_file(inode))
-               return f2fs_bmap_compress(inode, block);
+               blknr = f2fs_bmap_compress(inode, block);
 
-       return generic_block_bmap(mapping, block, get_data_block_bmap);
+       if (!get_data_block_bmap(inode, block, &tmp, 0))
+               blknr = tmp.b_blocknr;
+out:
+       trace_f2fs_bmap(inode, block, blknr);
+       return blknr;
 }
 
 #ifdef CONFIG_MIGRATION
index 0dbcb0f..4276c0f 100644 (file)
@@ -174,6 +174,26 @@ static void update_general_status(struct f2fs_sb_info *sbi)
        for (i = META_CP; i < META_MAX; i++)
                si->meta_count[i] = atomic_read(&sbi->meta_count[i]);
 
+       for (i = 0; i < NO_CHECK_TYPE; i++) {
+               si->dirty_seg[i] = 0;
+               si->full_seg[i] = 0;
+               si->valid_blks[i] = 0;
+       }
+
+       for (i = 0; i < MAIN_SEGS(sbi); i++) {
+               int blks = get_seg_entry(sbi, i)->valid_blocks;
+               int type = get_seg_entry(sbi, i)->type;
+
+               if (!blks)
+                       continue;
+
+               if (blks == sbi->blocks_per_seg)
+                       si->full_seg[type]++;
+               else
+                       si->dirty_seg[type]++;
+               si->valid_blks[type] += blks;
+       }
+
        for (i = 0; i < 2; i++) {
                si->segment_count[i] = sbi->segment_count[i];
                si->block_count[i] = sbi->block_count[i];
@@ -329,30 +349,50 @@ static int stat_show(struct seq_file *s, void *v)
                seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
                           si->main_area_segs, si->main_area_sections,
                           si->main_area_zones);
-               seq_printf(s, "  - COLD  data: %d, %d, %d\n",
+               seq_printf(s, "    TYPE         %8s %8s %8s %10s %10s %10s\n",
+                          "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
+               seq_printf(s, "  - COLD   data: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_COLD_DATA],
                           si->cursec[CURSEG_COLD_DATA],
-                          si->curzone[CURSEG_COLD_DATA]);
-               seq_printf(s, "  - WARM  data: %d, %d, %d\n",
+                          si->curzone[CURSEG_COLD_DATA],
+                          si->dirty_seg[CURSEG_COLD_DATA],
+                          si->full_seg[CURSEG_COLD_DATA],
+                          si->valid_blks[CURSEG_COLD_DATA]);
+               seq_printf(s, "  - WARM   data: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_WARM_DATA],
                           si->cursec[CURSEG_WARM_DATA],
-                          si->curzone[CURSEG_WARM_DATA]);
-               seq_printf(s, "  - HOT   data: %d, %d, %d\n",
+                          si->curzone[CURSEG_WARM_DATA],
+                          si->dirty_seg[CURSEG_WARM_DATA],
+                          si->full_seg[CURSEG_WARM_DATA],
+                          si->valid_blks[CURSEG_WARM_DATA]);
+               seq_printf(s, "  - HOT    data: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_HOT_DATA],
                           si->cursec[CURSEG_HOT_DATA],
-                          si->curzone[CURSEG_HOT_DATA]);
-               seq_printf(s, "  - Dir   dnode: %d, %d, %d\n",
+                          si->curzone[CURSEG_HOT_DATA],
+                          si->dirty_seg[CURSEG_HOT_DATA],
+                          si->full_seg[CURSEG_HOT_DATA],
+                          si->valid_blks[CURSEG_HOT_DATA]);
+               seq_printf(s, "  - Dir   dnode: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_HOT_NODE],
                           si->cursec[CURSEG_HOT_NODE],
-                          si->curzone[CURSEG_HOT_NODE]);
-               seq_printf(s, "  - File   dnode: %d, %d, %d\n",
+                          si->curzone[CURSEG_HOT_NODE],
+                          si->dirty_seg[CURSEG_HOT_NODE],
+                          si->full_seg[CURSEG_HOT_NODE],
+                          si->valid_blks[CURSEG_HOT_NODE]);
+               seq_printf(s, "  - File  dnode: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_WARM_NODE],
                           si->cursec[CURSEG_WARM_NODE],
-                          si->curzone[CURSEG_WARM_NODE]);
-               seq_printf(s, "  - Indir nodes: %d, %d, %d\n",
+                          si->curzone[CURSEG_WARM_NODE],
+                          si->dirty_seg[CURSEG_WARM_NODE],
+                          si->full_seg[CURSEG_WARM_NODE],
+                          si->valid_blks[CURSEG_WARM_NODE]);
+               seq_printf(s, "  - Indir nodes: %8d %8d %8d %10u %10u %10u\n",
                           si->curseg[CURSEG_COLD_NODE],
                           si->cursec[CURSEG_COLD_NODE],
-                          si->curzone[CURSEG_COLD_NODE]);
+                          si->curzone[CURSEG_COLD_NODE],
+                          si->dirty_seg[CURSEG_COLD_NODE],
+                          si->full_seg[CURSEG_COLD_NODE],
+                          si->valid_blks[CURSEG_COLD_NODE]);
                seq_printf(s, "\n  - Valid: %d\n  - Dirty: %d\n",
                           si->main_area_segs - si->dirty_count -
                           si->prefree_count - si->free_segs,
index d359767..069f498 100644 (file)
@@ -779,7 +779,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
                return err;
 
        /*
-        * An immature stakable filesystem shows a race condition between lookup
+        * An immature stackable filesystem shows a race condition between lookup
         * and create. If we have same task when doing lookup and create, it's
         * definitely fine as expected by VFS normally. Otherwise, let's just
         * verify on-disk dentry one more time, which guarantees filesystem
index e600784..686c68b 100644 (file)
@@ -325,9 +325,10 @@ static void __drop_largest_extent(struct extent_tree *et,
 }
 
 /* return true, if inode page is changed */
-static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL;
        struct extent_tree *et;
        struct extent_node *en;
        struct extent_info ei;
@@ -335,16 +336,18 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e
        if (!f2fs_may_extent_tree(inode)) {
                /* drop largest extent */
                if (i_ext && i_ext->len) {
+                       f2fs_wait_on_page_writeback(ipage, NODE, true, true);
                        i_ext->len = 0;
-                       return true;
+                       set_page_dirty(ipage);
+                       return;
                }
-               return false;
+               return;
        }
 
        et = __grab_extent_tree(inode);
 
        if (!i_ext || !i_ext->len)
-               return false;
+               return;
 
        get_extent_info(&ei, i_ext);
 
@@ -360,17 +363,14 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e
        }
 out:
        write_unlock(&et->lock);
-       return false;
 }
 
-bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+void f2fs_init_extent_tree(struct inode *inode, struct page *ipage)
 {
-       bool ret =  __f2fs_init_extent_tree(inode, i_ext);
+       __f2fs_init_extent_tree(inode, ipage);
 
        if (!F2FS_I(inode)->extent_tree)
                set_inode_flag(inode, FI_NO_EXTENT);
-
-       return ret;
 }
 
 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
index b35a50f..16322ea 100644 (file)
@@ -402,12 +402,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
 }
 
 /*
- * ioctl commands
+ * f2fs-specific ioctl commands
  */
-#define F2FS_IOC_GETFLAGS              FS_IOC_GETFLAGS
-#define F2FS_IOC_SETFLAGS              FS_IOC_SETFLAGS
-#define F2FS_IOC_GETVERSION            FS_IOC_GETVERSION
-
 #define F2FS_IOCTL_MAGIC               0xf5
 #define F2FS_IOC_START_ATOMIC_WRITE    _IO(F2FS_IOCTL_MAGIC, 1)
 #define F2FS_IOC_COMMIT_ATOMIC_WRITE   _IO(F2FS_IOCTL_MAGIC, 2)
@@ -434,13 +430,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
                                        _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
 #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS                               \
                                        _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
-
-#define F2FS_IOC_GET_VOLUME_NAME       FS_IOC_GETFSLABEL
-#define F2FS_IOC_SET_VOLUME_NAME       FS_IOC_SETFSLABEL
-
-#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
-#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
-#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
+#define F2FS_IOC_SEC_TRIM_FILE         _IOW(F2FS_IOCTL_MAGIC, 20,      \
+                                               struct f2fs_sectrim_range)
 
 /*
  * should be same as XFS_IOC_GOINGDOWN.
@@ -453,17 +444,12 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
 #define F2FS_GOING_DOWN_METAFLUSH      0x3     /* going down with meta flush */
 #define F2FS_GOING_DOWN_NEED_FSCK      0x4     /* going down to trigger fsck */
 
-#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
- * ioctl commands in 32 bit emulation
+ * Flags used by F2FS_IOC_SEC_TRIM_FILE
  */
-#define F2FS_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
-#define F2FS_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
-#define F2FS_IOC32_GETVERSION          FS_IOC32_GETVERSION
-#endif
-
-#define F2FS_IOC_FSGETXATTR            FS_IOC_FSGETXATTR
-#define F2FS_IOC_FSSETXATTR            FS_IOC_FSSETXATTR
+#define F2FS_TRIM_FILE_DISCARD         0x1     /* send discard command */
+#define F2FS_TRIM_FILE_ZEROOUT         0x2     /* zero out */
+#define F2FS_TRIM_FILE_MASK            0x3
 
 struct f2fs_gc_range {
        u32 sync;
@@ -488,6 +474,12 @@ struct f2fs_flush_device {
        u32 segments;           /* # of segments to flush */
 };
 
+struct f2fs_sectrim_range {
+       u64 start;
+       u64 len;
+       u64 flags;
+};
+
 /* for inline stuff */
 #define DEF_INLINE_RESERVED_SIZE       1
 static inline int get_extra_isize(struct inode *inode);
@@ -794,6 +786,7 @@ struct f2fs_inode_info {
        struct list_head inmem_pages;   /* inmemory pages managed by f2fs */
        struct task_struct *inmem_task; /* store inmemory task */
        struct mutex inmem_lock;        /* lock for inmemory pages */
+       pgoff_t ra_offset;              /* ongoing readahead offset */
        struct extent_tree *extent_tree;        /* cached extent_tree entry */
 
        /* avoid racing between foreground op and gc */
@@ -1267,7 +1260,8 @@ enum {
        GC_NORMAL,
        GC_IDLE_CB,
        GC_IDLE_GREEDY,
-       GC_URGENT,
+       GC_URGENT_HIGH,
+       GC_URGENT_LOW,
 };
 
 enum {
@@ -1313,6 +1307,14 @@ enum fsync_mode {
 #define IS_DUMMY_WRITTEN_PAGE(page)                    \
                (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
 
+#ifdef CONFIG_F2FS_IO_TRACE
+#define IS_IO_TRACED_PAGE(page)                        \
+               (page_private(page) > 0 &&              \
+                page_private(page) < (unsigned long)PID_MAX_LIMIT)
+#else
+#define IS_IO_TRACED_PAGE(page) (0)
+#endif
+
 #ifdef CONFIG_FS_ENCRYPTION
 #define DUMMY_ENCRYPTION_ENABLED(sbi) \
        (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL))
@@ -1438,7 +1440,7 @@ struct f2fs_sb_info {
        unsigned long last_time[MAX_TIME];      /* to store time in jiffies */
        long interval_time[MAX_TIME];           /* to store thresholds */
 
-       struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
+       struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 
        spinlock_t fsync_node_lock;             /* for node entry lock */
        struct list_head fsync_node_list;       /* node list head */
@@ -1516,8 +1518,9 @@ struct f2fs_sb_info {
        unsigned int cur_victim_sec;            /* current victim section num */
        unsigned int gc_mode;                   /* current GC state */
        unsigned int next_victim_seg[2];        /* next segment in victim section */
+
        /* for skip statistic */
-       unsigned int atomic_files;              /* # of opened atomic file */
+       unsigned int atomic_files;              /* # of opened atomic file */
        unsigned long long skipped_atomic_files[2];     /* FG_GC and BG_GC */
        unsigned long long skipped_gc_rwsem;            /* FG_GC only */
 
@@ -2456,7 +2459,7 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
 
 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
 {
-       if (sbi->gc_mode == GC_URGENT)
+       if (sbi->gc_mode == GC_URGENT_HIGH)
                return true;
 
        if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
@@ -2474,6 +2477,10 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
                        atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
                return false;
 
+       if (sbi->gc_mode == GC_URGENT_LOW &&
+                       (type == DISCARD_TIME || type == GC_TIME))
+               return true;
+
        return f2fs_time_over(sbi, type);
 }
 
@@ -2649,7 +2656,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
 
 static inline void set_inode_flag(struct inode *inode, int flag)
 {
-       test_and_set_bit(flag, F2FS_I(inode)->flags);
+       set_bit(flag, F2FS_I(inode)->flags);
        __mark_inode_dirty_flag(inode, flag, true);
 }
 
@@ -2660,7 +2667,7 @@ static inline int is_inode_flag_set(struct inode *inode, int flag)
 
 static inline void clear_inode_flag(struct inode *inode, int flag)
 {
-       test_and_clear_bit(flag, F2FS_I(inode)->flags);
+       clear_bit(flag, F2FS_I(inode)->flags);
        __mark_inode_dirty_flag(inode, flag, false);
 }
 
@@ -3275,7 +3282,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
 int f2fs_move_node_page(struct page *node_page, int gc_type);
-int f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
+void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
                        struct writeback_control *wbc, bool atomic,
                        unsigned int *seq_id);
@@ -3287,7 +3294,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
@@ -3325,9 +3332,10 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
-void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
                                        unsigned int start, unsigned int end);
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
+void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
                                        struct cp_control *cpc);
@@ -3350,7 +3358,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
                        block_t old_blkaddr, block_t *new_blkaddr,
                        struct f2fs_summary *sum, int type,
-                       struct f2fs_io_info *fio, bool add_list);
+                       struct f2fs_io_info *fio);
 void f2fs_wait_on_page_writeback(struct page *page,
                        enum page_type type, bool ordered, bool locked);
 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
@@ -3448,7 +3456,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
 struct page *f2fs_get_new_data_page(struct inode *inode,
                        struct page *ipage, pgoff_t index, bool new_i_size);
 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
-void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
+void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
                        int create, int flag);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3536,6 +3544,9 @@ struct f2fs_stat_info {
        int curseg[NR_CURSEG_TYPE];
        int cursec[NR_CURSEG_TYPE];
        int curzone[NR_CURSEG_TYPE];
+       unsigned int dirty_seg[NR_CURSEG_TYPE];
+       unsigned int full_seg[NR_CURSEG_TYPE];
+       unsigned int valid_blks[NR_CURSEG_TYPE];
 
        unsigned int meta_count[META_MAX];
        unsigned int segment_count[2];
@@ -3750,7 +3761,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
 int f2fs_convert_inline_inode(struct inode *inode);
 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
 int f2fs_write_inline_data(struct inode *inode, struct page *page);
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
                                        const struct f2fs_filename *fname,
                                        struct page **res_page);
@@ -3795,7 +3806,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
                                                struct rb_root_cached *root);
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
-bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
+void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
 void f2fs_drop_extent_tree(struct inode *inode);
 unsigned int f2fs_destroy_extent_node(struct inode *inode);
 void f2fs_destroy_extent_tree(struct inode *inode);
index 3268f8d..8a42240 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/uuid.h>
 #include <linux/file.h>
 #include <linux/nls.h>
+#include <linux/sched/signal.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -105,11 +106,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
 
        if (need_alloc) {
                /* block allocation */
-               __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+               f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                err = f2fs_get_block(&dn, page->index);
                f2fs_put_dnode(&dn);
-               __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+               f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
        }
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -1373,8 +1374,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        truncate_pagecache(inode, offset);
 
        new_size = i_size_read(inode) - len;
-       truncate_pagecache(inode, new_size);
-
        ret = f2fs_truncate_blocks(inode, new_size, true);
        up_write(&F2FS_I(inode)->i_mmap_sem);
        if (!ret)
@@ -1660,7 +1659,7 @@ next_alloc:
                map.m_seg_type = CURSEG_COLD_DATA_PINNED;
 
                f2fs_lock_op(sbi);
-               f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+               f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA);
                f2fs_unlock_op(sbi);
 
                err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
@@ -2527,6 +2526,11 @@ do_more:
        }
 
        ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
+       if (ret) {
+               if (ret == -EBUSY)
+                       ret = -EAGAIN;
+               goto out;
+       }
        range.start += BLKS_PER_SEC(sbi);
        if (range.start <= end)
                goto do_more;
@@ -3359,7 +3363,7 @@ static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
        return fsverity_ioctl_measure(filp, (void __user *)arg);
 }
 
-static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3385,7 +3389,7 @@ static int f2fs_get_volume_name(struct file *filp, unsigned long arg)
        return err;
 }
 
-static int f2fs_set_volume_name(struct file *filp, unsigned long arg)
+static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -3531,14 +3535,14 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
        if (ret)
                goto out;
 
-       if (!F2FS_I(inode)->i_compr_blocks)
-               goto out;
-
        F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
        f2fs_set_inode_flags(inode);
        inode->i_ctime = current_time(inode);
        f2fs_mark_inode_dirty_sync(inode, true);
 
+       if (!F2FS_I(inode)->i_compr_blocks)
+               goto out;
+
        down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        down_write(&F2FS_I(inode)->i_mmap_sem);
 
@@ -3756,6 +3760,193 @@ out:
        return ret;
 }
 
+static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
+               pgoff_t off, block_t block, block_t len, u32 flags)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+       sector_t sector = SECTOR_FROM_BLOCK(block);
+       sector_t nr_sects = SECTOR_FROM_BLOCK(len);
+       int ret = 0;
+
+       if (!q)
+               return -ENXIO;
+
+       if (flags & F2FS_TRIM_FILE_DISCARD)
+               ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
+                                               blk_queue_secure_erase(q) ?
+                                               BLKDEV_DISCARD_SECURE : 0);
+
+       if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
+               if (IS_ENCRYPTED(inode))
+                       ret = fscrypt_zeroout_range(inode, off, block, len);
+               else
+                       ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
+                                       GFP_NOFS, 0);
+       }
+
+       return ret;
+}
+
+static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
+       struct block_device *prev_bdev = NULL;
+       struct f2fs_sectrim_range range;
+       pgoff_t index, pg_end, prev_index = 0;
+       block_t prev_block = 0, len = 0;
+       loff_t end_addr;
+       bool to_end = false;
+       int ret = 0;
+
+       if (!(filp->f_mode & FMODE_WRITE))
+               return -EBADF;
+
+       if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
+                               sizeof(range)))
+               return -EFAULT;
+
+       if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
+                       !S_ISREG(inode->i_mode))
+               return -EINVAL;
+
+       if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
+                       !f2fs_hw_support_discard(sbi)) ||
+                       ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
+                        IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
+               return -EOPNOTSUPP;
+
+       file_start_write(filp);
+       inode_lock(inode);
+
+       if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
+                       range.start >= inode->i_size) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (range.len == 0)
+               goto err;
+
+       if (inode->i_size - range.start > range.len) {
+               end_addr = range.start + range.len;
+       } else {
+               end_addr = range.len == (u64)-1 ?
+                       sbi->sb->s_maxbytes : inode->i_size;
+               to_end = true;
+       }
+
+       if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
+                       (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       index = F2FS_BYTES_TO_BLK(range.start);
+       pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               goto err;
+
+       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       down_write(&F2FS_I(inode)->i_mmap_sem);
+
+       ret = filemap_write_and_wait_range(mapping, range.start,
+                       to_end ? LLONG_MAX : end_addr - 1);
+       if (ret)
+               goto out;
+
+       truncate_inode_pages_range(mapping, range.start,
+                       to_end ? -1 : end_addr - 1);
+
+       while (index < pg_end) {
+               struct dnode_of_data dn;
+               pgoff_t end_offset, count;
+               int i;
+
+               set_new_dnode(&dn, inode, NULL, NULL, 0);
+               ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+               if (ret) {
+                       if (ret == -ENOENT) {
+                               index = f2fs_get_next_page_offset(&dn, index);
+                               continue;
+                       }
+                       goto out;
+               }
+
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+               count = min(end_offset - dn.ofs_in_node, pg_end - index);
+               for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
+                       struct block_device *cur_bdev;
+                       block_t blkaddr = f2fs_data_blkaddr(&dn);
+
+                       if (!__is_valid_data_blkaddr(blkaddr))
+                               continue;
+
+                       if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+                                               DATA_GENERIC_ENHANCE)) {
+                               ret = -EFSCORRUPTED;
+                               f2fs_put_dnode(&dn);
+                               goto out;
+                       }
+
+                       cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
+                       if (f2fs_is_multi_device(sbi)) {
+                               int di = f2fs_target_device_index(sbi, blkaddr);
+
+                               blkaddr -= FDEV(di).start_blk;
+                       }
+
+                       if (len) {
+                               if (prev_bdev == cur_bdev &&
+                                               index == prev_index + len &&
+                                               blkaddr == prev_block + len) {
+                                       len++;
+                               } else {
+                                       ret = f2fs_secure_erase(prev_bdev,
+                                               inode, prev_index, prev_block,
+                                               len, range.flags);
+                                       if (ret) {
+                                               f2fs_put_dnode(&dn);
+                                               goto out;
+                                       }
+
+                                       len = 0;
+                               }
+                       }
+
+                       if (!len) {
+                               prev_bdev = cur_bdev;
+                               prev_index = index;
+                               prev_block = blkaddr;
+                               len = 1;
+                       }
+               }
+
+               f2fs_put_dnode(&dn);
+
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       goto out;
+               }
+               cond_resched();
+       }
+
+       if (len)
+               ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
+                               prev_block, len, range.flags);
+out:
+       up_write(&F2FS_I(inode)->i_mmap_sem);
+       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+err:
+       inode_unlock(inode);
+       file_end_write(filp);
+
+       return ret;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3764,11 +3955,11 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return -ENOSPC;
 
        switch (cmd) {
-       case F2FS_IOC_GETFLAGS:
+       case FS_IOC_GETFLAGS:
                return f2fs_ioc_getflags(filp, arg);
-       case F2FS_IOC_SETFLAGS:
+       case FS_IOC_SETFLAGS:
                return f2fs_ioc_setflags(filp, arg);
-       case F2FS_IOC_GETVERSION:
+       case FS_IOC_GETVERSION:
                return f2fs_ioc_getversion(filp, arg);
        case F2FS_IOC_START_ATOMIC_WRITE:
                return f2fs_ioc_start_atomic_write(filp);
@@ -3784,11 +3975,11 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return f2fs_ioc_shutdown(filp, arg);
        case FITRIM:
                return f2fs_ioc_fitrim(filp, arg);
-       case F2FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_SET_ENCRYPTION_POLICY:
                return f2fs_ioc_set_encryption_policy(filp, arg);
-       case F2FS_IOC_GET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
                return f2fs_ioc_get_encryption_policy(filp, arg);
-       case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+       case FS_IOC_GET_ENCRYPTION_PWSALT:
                return f2fs_ioc_get_encryption_pwsalt(filp, arg);
        case FS_IOC_GET_ENCRYPTION_POLICY_EX:
                return f2fs_ioc_get_encryption_policy_ex(filp, arg);
@@ -3816,9 +4007,9 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return f2fs_ioc_flush_device(filp, arg);
        case F2FS_IOC_GET_FEATURES:
                return f2fs_ioc_get_features(filp, arg);
-       case F2FS_IOC_FSGETXATTR:
+       case FS_IOC_FSGETXATTR:
                return f2fs_ioc_fsgetxattr(filp, arg);
-       case F2FS_IOC_FSSETXATTR:
+       case FS_IOC_FSSETXATTR:
                return f2fs_ioc_fssetxattr(filp, arg);
        case F2FS_IOC_GET_PIN_FILE:
                return f2fs_ioc_get_pin_file(filp, arg);
@@ -3832,16 +4023,18 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return f2fs_ioc_enable_verity(filp, arg);
        case FS_IOC_MEASURE_VERITY:
                return f2fs_ioc_measure_verity(filp, arg);
-       case F2FS_IOC_GET_VOLUME_NAME:
-               return f2fs_get_volume_name(filp, arg);
-       case F2FS_IOC_SET_VOLUME_NAME:
-               return f2fs_set_volume_name(filp, arg);
+       case FS_IOC_GETFSLABEL:
+               return f2fs_ioc_getfslabel(filp, arg);
+       case FS_IOC_SETFSLABEL:
+               return f2fs_ioc_setfslabel(filp, arg);
        case F2FS_IOC_GET_COMPRESS_BLOCKS:
                return f2fs_get_compress_blocks(filp, arg);
        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
                return f2fs_release_compress_blocks(filp, arg);
        case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
                return f2fs_reserve_compress_blocks(filp, arg);
+       case F2FS_IOC_SEC_TRIM_FILE:
+               return f2fs_sec_trim_file(filp, arg);
        default:
                return -ENOTTY;
        }
@@ -3966,14 +4159,14 @@ out:
 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
-       case F2FS_IOC32_GETFLAGS:
-               cmd = F2FS_IOC_GETFLAGS;
+       case FS_IOC32_GETFLAGS:
+               cmd = FS_IOC_GETFLAGS;
                break;
-       case F2FS_IOC32_SETFLAGS:
-               cmd = F2FS_IOC_SETFLAGS;
+       case FS_IOC32_SETFLAGS:
+               cmd = FS_IOC_SETFLAGS;
                break;
-       case F2FS_IOC32_GETVERSION:
-               cmd = F2FS_IOC_GETVERSION;
+       case FS_IOC32_GETVERSION:
+               cmd = FS_IOC_GETVERSION;
                break;
        case F2FS_IOC_START_ATOMIC_WRITE:
        case F2FS_IOC_COMMIT_ATOMIC_WRITE:
@@ -3982,9 +4175,9 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_ABORT_VOLATILE_WRITE:
        case F2FS_IOC_SHUTDOWN:
        case FITRIM:
-       case F2FS_IOC_SET_ENCRYPTION_POLICY:
-       case F2FS_IOC_GET_ENCRYPTION_PWSALT:
-       case F2FS_IOC_GET_ENCRYPTION_POLICY:
+       case FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_PWSALT:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
        case FS_IOC_GET_ENCRYPTION_POLICY_EX:
        case FS_IOC_ADD_ENCRYPTION_KEY:
        case FS_IOC_REMOVE_ENCRYPTION_KEY:
@@ -3998,19 +4191,20 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_MOVE_RANGE:
        case F2FS_IOC_FLUSH_DEVICE:
        case F2FS_IOC_GET_FEATURES:
-       case F2FS_IOC_FSGETXATTR:
-       case F2FS_IOC_FSSETXATTR:
+       case FS_IOC_FSGETXATTR:
+       case FS_IOC_FSSETXATTR:
        case F2FS_IOC_GET_PIN_FILE:
        case F2FS_IOC_SET_PIN_FILE:
        case F2FS_IOC_PRECACHE_EXTENTS:
        case F2FS_IOC_RESIZE_FS:
        case FS_IOC_ENABLE_VERITY:
        case FS_IOC_MEASURE_VERITY:
-       case F2FS_IOC_GET_VOLUME_NAME:
-       case F2FS_IOC_SET_VOLUME_NAME:
+       case FS_IOC_GETFSLABEL:
+       case FS_IOC_SETFSLABEL:
        case F2FS_IOC_GET_COMPRESS_BLOCKS:
        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
        case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+       case F2FS_IOC_SEC_TRIM_FILE:
                break;
        default:
                return -ENOIOCTLCMD;
index 5b95d5a..11b4add 100644 (file)
@@ -21,6 +21,9 @@
 #include "gc.h"
 #include <trace/events/f2fs.h>
 
+static unsigned int count_bits(const unsigned long *addr,
+                               unsigned int offset, unsigned int len);
+
 static int gc_thread_func(void *data)
 {
        struct f2fs_sb_info *sbi = data;
@@ -79,7 +82,7 @@ static int gc_thread_func(void *data)
                 * invalidated soon after by user update or deletion.
                 * So, I'd like to wait some time to collect dirty segments.
                 */
-               if (sbi->gc_mode == GC_URGENT) {
+               if (sbi->gc_mode == GC_URGENT_HIGH) {
                        wait_ms = gc_th->urgent_sleep_time;
                        down_write(&sbi->gc_lock);
                        goto do_gc;
@@ -173,7 +176,7 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
                gc_mode = GC_CB;
                break;
        case GC_IDLE_GREEDY:
-       case GC_URGENT:
+       case GC_URGENT_HIGH:
                gc_mode = GC_GREEDY;
                break;
        }
@@ -187,14 +190,20 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 
        if (p->alloc_mode == SSR) {
                p->gc_mode = GC_GREEDY;
-               p->dirty_segmap = dirty_i->dirty_segmap[type];
+               p->dirty_bitmap = dirty_i->dirty_segmap[type];
                p->max_search = dirty_i->nr_dirty[type];
                p->ofs_unit = 1;
        } else {
                p->gc_mode = select_gc_type(sbi, gc_type);
-               p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
-               p->max_search = dirty_i->nr_dirty[DIRTY];
                p->ofs_unit = sbi->segs_per_sec;
+               if (__is_large_section(sbi)) {
+                       p->dirty_bitmap = dirty_i->dirty_secmap;
+                       p->max_search = count_bits(p->dirty_bitmap,
+                                               0, MAIN_SECS(sbi));
+               } else {
+                       p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
+                       p->max_search = dirty_i->nr_dirty[DIRTY];
+               }
        }
 
        /*
@@ -202,7 +211,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
         * foreground GC and urgent GC cases.
         */
        if (gc_type != FG_GC &&
-                       (sbi->gc_mode != GC_URGENT) &&
+                       (sbi->gc_mode != GC_URGENT_HIGH) &&
                        p->max_search > sbi->max_victim_search)
                p->max_search = sbi->max_victim_search;
 
@@ -321,6 +330,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        unsigned int secno, last_victim;
        unsigned int last_segment;
        unsigned int nsearched = 0;
+       int ret = 0;
 
        mutex_lock(&dirty_i->seglist_lock);
        last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
@@ -332,12 +342,19 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        p.min_cost = get_max_cost(sbi, &p);
 
        if (*result != NULL_SEGNO) {
-               if (get_valid_blocks(sbi, *result, false) &&
-                       !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+               if (!get_valid_blocks(sbi, *result, false)) {
+                       ret = -ENODATA;
+                       goto out;
+               }
+
+               if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+                       ret = -EBUSY;
+               else
                        p.min_segno = *result;
                goto out;
        }
 
+       ret = -ENODATA;
        if (p.max_search == 0)
                goto out;
 
@@ -365,10 +382,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        }
 
        while (1) {
-               unsigned long cost;
-               unsigned int segno;
-
-               segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
+               unsigned long cost, *dirty_bitmap;
+               unsigned int unit_no, segno;
+
+               dirty_bitmap = p.dirty_bitmap;
+               unit_no = find_next_bit(dirty_bitmap,
+                               last_segment / p.ofs_unit,
+                               p.offset / p.ofs_unit);
+               segno = unit_no * p.ofs_unit;
                if (segno >= last_segment) {
                        if (sm->last_victim[p.gc_mode]) {
                                last_segment =
@@ -381,14 +402,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                }
 
                p.offset = segno + p.ofs_unit;
-               if (p.ofs_unit > 1) {
-                       p.offset -= segno % p.ofs_unit;
-                       nsearched += count_bits(p.dirty_segmap,
-                                               p.offset - p.ofs_unit,
-                                               p.ofs_unit);
-               } else {
-                       nsearched++;
-               }
+               nsearched++;
 
 #ifdef CONFIG_F2FS_CHECK_FS
                /*
@@ -421,9 +435,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
 next:
                if (nsearched >= p.max_search) {
                        if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
-                               sm->last_victim[p.gc_mode] = last_victim + 1;
+                               sm->last_victim[p.gc_mode] =
+                                       last_victim + p.ofs_unit;
                        else
-                               sm->last_victim[p.gc_mode] = segno + 1;
+                               sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
                        sm->last_victim[p.gc_mode] %=
                                (MAIN_SECS(sbi) * sbi->segs_per_sec);
                        break;
@@ -440,6 +455,7 @@ got_result:
                        else
                                set_bit(secno, dirty_i->victim_secmap);
                }
+               ret = 0;
 
        }
 out:
@@ -449,7 +465,7 @@ out:
                                prefree_segments(sbi), free_segments(sbi));
        mutex_unlock(&dirty_i->seglist_lock);
 
-       return (p.min_segno == NULL_SEGNO) ? 0 : 1;
+       return ret;
 }
 
 static const struct victim_selection default_v_ops = {
@@ -833,8 +849,10 @@ static int move_data_block(struct inode *inode, block_t bidx,
 
        mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
                                        fio.old_blkaddr, false);
-       if (!mpage)
+       if (!mpage) {
+               err = -ENOMEM;
                goto up_out;
+       }
 
        fio.encrypted_page = mpage;
 
@@ -859,7 +877,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
        }
 
        f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
-                                       &sum, CURSEG_COLD_DATA, NULL, false);
+                                       &sum, CURSEG_COLD_DATA, NULL);
 
        fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
                                newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
@@ -1333,10 +1351,9 @@ gc_more:
                ret = -EINVAL;
                goto stop;
        }
-       if (!__get_victim(sbi, &segno, gc_type)) {
-               ret = -ENODATA;
+       ret = __get_victim(sbi, &segno, gc_type);
+       if (ret)
                goto stop;
-       }
 
        seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
        if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
@@ -1434,7 +1451,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
 
        /* Move out cursegs from the target range */
        for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
-               allocate_segment_for_resize(sbi, type, start, end);
+               f2fs_allocate_segment_for_resize(sbi, type, start, end);
 
        /* do GC to move out valid blocks in the range */
        for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
index dbade31..102df44 100644 (file)
@@ -12,6 +12,7 @@
 
 #include "f2fs.h"
 #include "node.h"
+#include <trace/events/f2fs.h>
 
 bool f2fs_may_inline_data(struct inode *inode)
 {
@@ -253,7 +254,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
        return 0;
 }
 
-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct f2fs_inode *ri = NULL;
@@ -275,7 +276,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
                        ri && (ri->i_inline & F2FS_INLINE_DATA)) {
 process_inline:
                ipage = f2fs_get_node_page(sbi, inode->i_ino);
-               f2fs_bug_on(sbi, IS_ERR(ipage));
+               if (IS_ERR(ipage))
+                       return PTR_ERR(ipage);
 
                f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
@@ -288,21 +290,25 @@ process_inline:
 
                set_page_dirty(ipage);
                f2fs_put_page(ipage, 1);
-               return true;
+               return 1;
        }
 
        if (f2fs_has_inline_data(inode)) {
                ipage = f2fs_get_node_page(sbi, inode->i_ino);
-               f2fs_bug_on(sbi, IS_ERR(ipage));
+               if (IS_ERR(ipage))
+                       return PTR_ERR(ipage);
                f2fs_truncate_inline_inode(inode, ipage, 0);
                clear_inode_flag(inode, FI_INLINE_DATA);
                f2fs_put_page(ipage, 1);
        } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
-               if (f2fs_truncate_blocks(inode, 0, false))
-                       return false;
+               int ret;
+
+               ret = f2fs_truncate_blocks(inode, 0, false);
+               if (ret)
+                       return ret;
                goto process_inline;
        }
-       return false;
+       return 0;
 }
 
 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
@@ -776,6 +782,7 @@ int f2fs_inline_data_fiemap(struct inode *inode,
        byteaddr += (char *)inline_data_addr(inode, ipage) -
                                        (char *)F2FS_INODE(ipage);
        err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
+       trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
 out:
        f2fs_put_page(ipage, 1);
        return err;
index 44582a4..66969ae 100644 (file)
@@ -367,8 +367,7 @@ static int do_read_inode(struct inode *inode)
        fi->i_pino = le32_to_cpu(ri->i_pino);
        fi->i_dir_level = ri->i_dir_level;
 
-       if (f2fs_init_extent_tree(inode, &ri->i_ext))
-               set_page_dirty(node_page);
+       f2fs_init_extent_tree(inode, node_page);
 
        get_inline_info(inode, ri);
 
@@ -402,6 +401,7 @@ static int do_read_inode(struct inode *inode)
 
        /* try to recover cold bit for non-dir inode */
        if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
+               f2fs_wait_on_page_writeback(node_page, NODE, true, true);
                set_cold_node(node_page, false);
                set_page_dirty(node_page);
        }
index e94e02c..84e4bbc 100644 (file)
@@ -569,15 +569,17 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
 
        trace_f2fs_unlink_enter(dir, dentry);
 
-       if (unlikely(f2fs_cp_error(sbi)))
-               return -EIO;
+       if (unlikely(f2fs_cp_error(sbi))) {
+               err = -EIO;
+               goto fail;
+       }
 
        err = dquot_initialize(dir);
        if (err)
-               return err;
+               goto fail;
        err = dquot_initialize(inode);
        if (err)
-               return err;
+               goto fail;
 
        de = f2fs_find_entry(dir, &dentry->d_name, &page);
        if (!de) {
@@ -600,7 +602,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        /* VFS negative dentries are incompatible with Encoding and
         * Case-insensitiveness. Eventually we'll want avoid
         * invalidating the dentries here, alongside with returning the
-        * negative dentries at f2fs_lookup(), when it is  better
+        * negative dentries at f2fs_lookup(), when it is better
         * supported by the VFS for the CI case.
         */
        if (IS_CASEFOLDED(dir))
@@ -1285,7 +1287,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
 }
 
 const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
-       .get_link       = f2fs_encrypted_get_link,
+       .get_link       = f2fs_encrypted_get_link,
        .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .listxattr      = f2fs_listxattr,
@@ -1311,7 +1313,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
 };
 
 const struct inode_operations f2fs_symlink_inode_operations = {
-       .get_link       = f2fs_get_link,
+       .get_link       = f2fs_get_link,
        .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .listxattr      = f2fs_listxattr,
@@ -1319,7 +1321,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
 
 const struct inode_operations f2fs_special_inode_operations = {
        .getattr        = f2fs_getattr,
-       .setattr        = f2fs_setattr,
+       .setattr        = f2fs_setattr,
        .get_acl        = f2fs_get_acl,
        .set_acl        = f2fs_set_acl,
        .listxattr      = f2fs_listxattr,
index 03e24df..9bbaa26 100644 (file)
@@ -1041,8 +1041,10 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 
        level = get_node_path(inode, from, offset, noffset);
-       if (level < 0)
+       if (level < 0) {
+               trace_f2fs_truncate_inode_blocks_exit(inode, level);
                return level;
+       }
 
        page = f2fs_get_node_page(sbi, inode->i_ino);
        if (IS_ERR(page)) {
@@ -1726,7 +1728,7 @@ continue_unlock:
                                        set_dentry_mark(page,
                                                f2fs_need_dentry_mark(sbi, ino));
                                }
-                               /*  may be written by other thread */
+                               /* may be written by other thread */
                                if (!PageDirty(page))
                                        set_page_dirty(page);
                        }
@@ -1814,12 +1816,11 @@ static bool flush_dirty_inode(struct page *page)
        return true;
 }
 
-int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
+void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
 {
        pgoff_t index = 0;
        struct pagevec pvec;
        int nr_pages;
-       int ret = 0;
 
        pagevec_init(&pvec);
 
@@ -1858,7 +1859,6 @@ continue_unlock:
                pagevec_release(&pvec);
                cond_resched();
        }
-       return ret;
 }
 
 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
@@ -1924,8 +1924,12 @@ continue_unlock:
                                goto continue_unlock;
                        }
 
-                       /* flush inline_data, if it's async context. */
-                       if (do_balance && is_inline_node(page)) {
+                       /* flush inline_data/inode, if it's async context. */
+                       if (!do_balance)
+                               goto write_node;
+
+                       /* flush inline_data */
+                       if (is_inline_node(page)) {
                                clear_inline_node(page);
                                unlock_page(page);
                                flush_inline_data(sbi, ino_of_node(page));
@@ -1938,7 +1942,7 @@ continue_unlock:
                                if (flush_dirty_inode(page))
                                        goto lock_node;
                        }
-
+write_node:
                        f2fs_wait_on_page_writeback(page, NODE, true, true);
 
                        if (!clear_page_dirty_for_io(page))
@@ -2097,7 +2101,7 @@ const struct address_space_operations f2fs_node_aops = {
        .invalidatepage = f2fs_invalidate_page,
        .releasepage    = f2fs_release_page,
 #ifdef CONFIG_MIGRATION
-       .migratepage    = f2fs_migrate_page,
+       .migratepage    = f2fs_migrate_page,
 #endif
 };
 
@@ -2108,7 +2112,7 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
 }
 
 static int __insert_free_nid(struct f2fs_sb_info *sbi,
-                       struct free_nid *i, enum nid_state state)
+                               struct free_nid *i)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
 
@@ -2116,10 +2120,8 @@ static int __insert_free_nid(struct f2fs_sb_info *sbi,
        if (err)
                return err;
 
-       f2fs_bug_on(sbi, state != i->state);
-       nm_i->nid_cnt[state]++;
-       if (state == FREE_NID)
-               list_add_tail(&i->list, &nm_i->free_nid_list);
+       nm_i->nid_cnt[FREE_NID]++;
+       list_add_tail(&i->list, &nm_i->free_nid_list);
        return 0;
 }
 
@@ -2241,7 +2243,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
                }
        }
        ret = true;
-       err = __insert_free_nid(sbi, i, FREE_NID);
+       err = __insert_free_nid(sbi, i);
 err_out:
        if (update) {
                update_free_nid_bitmap(sbi, nid, ret, build);
@@ -2572,7 +2574,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
        return nr - nr_shrink;
 }
 
-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
 {
        void *src_addr, *dst_addr;
        size_t inline_size;
@@ -2580,7 +2582,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
        struct f2fs_inode *ri;
 
        ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
-       f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
+       if (IS_ERR(ipage))
+               return PTR_ERR(ipage);
 
        ri = F2FS_INODE(page);
        if (ri->i_inline & F2FS_INLINE_XATTR) {
@@ -2599,6 +2602,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
 update_inode:
        f2fs_update_inode(inode, ipage);
        f2fs_put_page(ipage, 1);
+       return 0;
 }
 
 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
index ae5310f..4f12ade 100644 (file)
@@ -544,7 +544,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 
        /* step 1: recover xattr */
        if (IS_INODE(page)) {
-               f2fs_recover_inline_xattr(inode, page);
+               err = f2fs_recover_inline_xattr(inode, page);
+               if (err)
+                       goto out;
        } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
                err = f2fs_recover_xattr_data(inode, page);
                if (!err)
@@ -553,8 +555,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        }
 
        /* step 2: recover inline data */
-       if (f2fs_recover_inline_data(inode, page))
+       err = f2fs_recover_inline_data(inode, page);
+       if (err) {
+               if (err == 1)
+                       err = 0;
                goto out;
+       }
 
        /* step 3: recover data indices */
        start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
@@ -742,7 +748,7 @@ next:
                f2fs_put_page(page, 1);
        }
        if (!err)
-               f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
+               f2fs_allocate_new_segments(sbi);
        return err;
 }
 
index 196f315..a65d357 100644 (file)
@@ -174,7 +174,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
 
        if (f2fs_lfs_mode(sbi))
                return false;
-       if (sbi->gc_mode == GC_URGENT)
+       if (sbi->gc_mode == GC_URGENT_HIGH)
                return true;
        if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
                return true;
@@ -796,6 +796,18 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
                }
                if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
                        dirty_i->nr_dirty[t]++;
+
+               if (__is_large_section(sbi)) {
+                       unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+                       unsigned short valid_blocks =
+                               get_valid_blocks(sbi, segno, true);
+
+                       f2fs_bug_on(sbi, unlikely(!valid_blocks ||
+                                       valid_blocks == BLKS_PER_SEC(sbi)));
+
+                       if (!IS_CURSEC(sbi, secno))
+                               set_bit(secno, dirty_i->dirty_secmap);
+               }
        }
 }
 
@@ -803,6 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
                enum dirty_type dirty_type)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+       unsigned short valid_blocks;
 
        if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
                dirty_i->nr_dirty[dirty_type]--;
@@ -814,13 +827,26 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
                if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
                        dirty_i->nr_dirty[t]--;
 
-               if (get_valid_blocks(sbi, segno, true) == 0) {
+               valid_blocks = get_valid_blocks(sbi, segno, true);
+               if (valid_blocks == 0) {
                        clear_bit(GET_SEC_FROM_SEG(sbi, segno),
                                                dirty_i->victim_secmap);
 #ifdef CONFIG_F2FS_CHECK_FS
                        clear_bit(segno, SIT_I(sbi)->invalid_segmap);
 #endif
                }
+               if (__is_large_section(sbi)) {
+                       unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
+                       if (!valid_blocks ||
+                                       valid_blocks == BLKS_PER_SEC(sbi)) {
+                               clear_bit(secno, dirty_i->dirty_secmap);
+                               return;
+                       }
+
+                       if (!IS_CURSEC(sbi, secno))
+                               set_bit(secno, dirty_i->dirty_secmap);
+               }
        }
 }
 
@@ -1733,7 +1759,7 @@ static int issue_discard_thread(void *data)
                        continue;
                }
 
-               if (sbi->gc_mode == GC_URGENT)
+               if (sbi->gc_mode == GC_URGENT_HIGH)
                        __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
 
                sb_start_intwrite(sbi->sb);
@@ -2140,7 +2166,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
        new_vblocks = se->valid_blocks + del;
        offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 
-       f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
+       f2fs_bug_on(sbi, (new_vblocks < 0 ||
                                (new_vblocks > sbi->blocks_per_seg)));
 
        se->valid_blocks = new_vblocks;
@@ -2605,7 +2631,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
        bool reversed = false;
 
        /* f2fs_need_SSR() already forces to do this */
-       if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
+       if (!v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
                curseg->next_segno = segno;
                return 1;
        }
@@ -2632,7 +2658,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
        for (; cnt-- > 0; reversed ? i-- : i++) {
                if (i == type)
                        continue;
-               if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
+               if (!v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
                        curseg->next_segno = segno;
                        return 1;
                }
@@ -2674,7 +2700,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
        stat_inc_seg_type(sbi, curseg);
 }
 
-void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
                                        unsigned int start, unsigned int end)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -2707,28 +2733,35 @@ unlock:
        up_read(&SM_I(sbi)->curseg_lock);
 }
 
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
+static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
 {
-       struct curseg_info *curseg;
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
        unsigned int old_segno;
-       int i;
 
-       down_write(&SIT_I(sbi)->sentry_lock);
+       if (!curseg->next_blkoff &&
+               !get_valid_blocks(sbi, curseg->segno, false) &&
+               !get_ckpt_valid_blocks(sbi, curseg->segno))
+               return;
 
-       for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
-               if (type != NO_CHECK_TYPE && i != type)
-                       continue;
+       old_segno = curseg->segno;
+       SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
+       locate_dirty_segment(sbi, old_segno);
+}
 
-               curseg = CURSEG_I(sbi, i);
-               if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
-                               get_valid_blocks(sbi, curseg->segno, false) ||
-                               get_ckpt_valid_blocks(sbi, curseg->segno)) {
-                       old_segno = curseg->segno;
-                       SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
-                       locate_dirty_segment(sbi, old_segno);
-               }
-       }
+void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
+{
+       down_write(&SIT_I(sbi)->sentry_lock);
+       __allocate_new_segment(sbi, type);
+       up_write(&SIT_I(sbi)->sentry_lock);
+}
 
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       down_write(&SIT_I(sbi)->sentry_lock);
+       for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
+               __allocate_new_segment(sbi, i);
        up_write(&SIT_I(sbi)->sentry_lock);
 }
 
@@ -3089,7 +3122,7 @@ static int __get_segment_type(struct f2fs_io_info *fio)
 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
                block_t old_blkaddr, block_t *new_blkaddr,
                struct f2fs_summary *sum, int type,
-               struct f2fs_io_info *fio, bool add_list)
+               struct f2fs_io_info *fio)
 {
        struct sit_info *sit_i = SIT_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -3107,14 +3140,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
                type = CURSEG_COLD_DATA;
        }
 
-       /*
-        * We need to wait for node_write to avoid block allocation during
-        * checkpoint. This can only happen to quota writes which can cause
-        * the below discard race condition.
-        */
-       if (IS_DATASEG(type))
-               down_write(&sbi->node_write);
-
        down_read(&SM_I(sbi)->curseg_lock);
 
        mutex_lock(&curseg->curseg_mutex);
@@ -3165,7 +3190,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
        if (F2FS_IO_ALIGNED(sbi))
                fio->retry = false;
 
-       if (add_list) {
+       if (fio) {
                struct f2fs_bio_info *io;
 
                INIT_LIST_HEAD(&fio->list);
@@ -3180,9 +3205,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
 
        up_read(&SM_I(sbi)->curseg_lock);
 
-       if (IS_DATASEG(type))
-               up_write(&sbi->node_write);
-
        if (put_pin_sem)
                up_read(&sbi->pin_sem);
 }
@@ -3217,7 +3239,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
                down_read(&fio->sbi->io_order_lock);
 reallocate:
        f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
-                       &fio->new_blkaddr, sum, type, fio, true);
+                       &fio->new_blkaddr, sum, type, fio);
        if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
                invalidate_mapping_pages(META_MAPPING(fio->sbi),
                                        fio->old_blkaddr, fio->old_blkaddr);
@@ -4293,8 +4315,9 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        struct free_segmap_info *free_i = FREE_I(sbi);
-       unsigned int segno = 0, offset = 0;
+       unsigned int segno = 0, offset = 0, secno;
        unsigned short valid_blocks;
+       unsigned short blks_per_sec = BLKS_PER_SEC(sbi);
 
        while (1) {
                /* find dirty segment based on free segmap */
@@ -4313,6 +4336,22 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
                __locate_dirty_segment(sbi, segno, DIRTY);
                mutex_unlock(&dirty_i->seglist_lock);
        }
+
+       if (!__is_large_section(sbi))
+               return;
+
+       mutex_lock(&dirty_i->seglist_lock);
+       for (segno = 0; segno < MAIN_SECS(sbi); segno += blks_per_sec) {
+               valid_blocks = get_valid_blocks(sbi, segno, true);
+               secno = GET_SEC_FROM_SEG(sbi, segno);
+
+               if (!valid_blocks || valid_blocks == blks_per_sec)
+                       continue;
+               if (IS_CURSEC(sbi, secno))
+                       continue;
+               set_bit(secno, dirty_i->dirty_secmap);
+       }
+       mutex_unlock(&dirty_i->seglist_lock);
 }
 
 static int init_victim_secmap(struct f2fs_sb_info *sbi)
@@ -4349,6 +4388,14 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
                        return -ENOMEM;
        }
 
+       if (__is_large_section(sbi)) {
+               bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
+               dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
+                                               bitmap_size, GFP_KERNEL);
+               if (!dirty_i->dirty_secmap)
+                       return -ENOMEM;
+       }
+
        init_dirty_segmap(sbi);
        return init_victim_secmap(sbi);
 }
@@ -4775,6 +4822,12 @@ static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
        for (i = 0; i < NR_DIRTY_TYPE; i++)
                discard_dirty_segmap(sbi, i);
 
+       if (__is_large_section(sbi)) {
+               mutex_lock(&dirty_i->seglist_lock);
+               kvfree(dirty_i->dirty_secmap);
+               mutex_unlock(&dirty_i->seglist_lock);
+       }
+
        destroy_victim_secmap(sbi);
        SM_I(sbi)->dirty_info = NULL;
        kvfree(dirty_i);
index cba16cc..752b177 100644 (file)
@@ -166,8 +166,11 @@ enum {
 struct victim_sel_policy {
        int alloc_mode;                 /* LFS or SSR */
        int gc_mode;                    /* GC_CB or GC_GREEDY */
-       unsigned long *dirty_segmap;    /* dirty segment bitmap */
-       unsigned int max_search;        /* maximum # of segments to search */
+       unsigned long *dirty_bitmap;    /* dirty segment/section bitmap */
+       unsigned int max_search;        /*
+                                        * maximum # of segments/sections
+                                        * to search
+                                        */
        unsigned int offset;            /* last scanned bitmap offset */
        unsigned int ofs_unit;          /* bitmap search unit */
        unsigned int min_cost;          /* minimum cost */
@@ -184,7 +187,7 @@ struct seg_entry {
        unsigned char *cur_valid_map_mir;       /* mirror of current valid bitmap */
 #endif
        /*
-        * # of valid blocks and the validity bitmap stored in the the last
+        * # of valid blocks and the validity bitmap stored in the last
         * checkpoint pack. This information is used by the SSR mode.
         */
        unsigned char *ckpt_valid_map;  /* validity bitmap of blocks last cp */
@@ -266,6 +269,7 @@ enum dirty_type {
 struct dirty_seglist_info {
        const struct victim_selection *v_ops;   /* victim selction operation */
        unsigned long *dirty_segmap[NR_DIRTY_TYPE];
+       unsigned long *dirty_secmap;
        struct mutex seglist_lock;              /* lock for segment bitmaps */
        int nr_dirty[NR_DIRTY_TYPE];            /* # of dirty segments */
        unsigned long *victim_secmap;           /* background GC victims */
index 23c49c3..dfa072f 100644 (file)
@@ -350,7 +350,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
        set_opt(sbi, QUOTA);
        return 0;
 errout:
-       kvfree(qname);
+       kfree(qname);
        return ret;
 }
 
@@ -362,7 +362,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
                f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
                return -EINVAL;
        }
-       kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+       kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
        F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
        return 0;
 }
@@ -462,9 +462,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        substring_t args[MAX_OPT_ARGS];
+#ifdef CONFIG_F2FS_FS_COMPRESSION
        unsigned char (*ext)[F2FS_EXTENSION_LEN];
+       int ext_cnt;
+#endif
        char *p, *name;
-       int arg = 0, ext_cnt;
+       int arg = 0;
        kuid_t uid;
        kgid_t gid;
        int ret;
@@ -496,10 +499,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        } else if (!strcmp(name, "sync")) {
                                F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
                        } else {
-                               kvfree(name);
+                               kfree(name);
                                return -EINVAL;
                        }
-                       kvfree(name);
+                       kfree(name);
                        break;
                case Opt_disable_roll_forward:
                        set_opt(sbi, DISABLE_ROLL_FORWARD);
@@ -656,17 +659,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        if (!strcmp(name, "adaptive")) {
                                if (f2fs_sb_has_blkzoned(sbi)) {
                                        f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
-                                       kvfree(name);
+                                       kfree(name);
                                        return -EINVAL;
                                }
                                F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
                        } else if (!strcmp(name, "lfs")) {
                                F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
                        } else {
-                               kvfree(name);
+                               kfree(name);
                                return -EINVAL;
                        }
-                       kvfree(name);
+                       kfree(name);
                        break;
                case Opt_io_size_bits:
                        if (args->from && match_int(args, &arg))
@@ -792,10 +795,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        } else if (!strcmp(name, "fs-based")) {
                                F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
                        } else {
-                               kvfree(name);
+                               kfree(name);
                                return -EINVAL;
                        }
-                       kvfree(name);
+                       kfree(name);
                        break;
                case Opt_alloc:
                        name = match_strdup(&args[0]);
@@ -807,10 +810,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        } else if (!strcmp(name, "reuse")) {
                                F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
                        } else {
-                               kvfree(name);
+                               kfree(name);
                                return -EINVAL;
                        }
-                       kvfree(name);
+                       kfree(name);
                        break;
                case Opt_fsync:
                        name = match_strdup(&args[0]);
@@ -824,10 +827,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                                F2FS_OPTION(sbi).fsync_mode =
                                                        FSYNC_MODE_NOBARRIER;
                        } else {
-                               kvfree(name);
+                               kfree(name);
                                return -EINVAL;
                        }
-                       kvfree(name);
+                       kfree(name);
                        break;
                case Opt_test_dummy_encryption:
                        ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
@@ -862,6 +865,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                case Opt_checkpoint_enable:
                        clear_opt(sbi, DISABLE_CHECKPOINT);
                        break;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
                case Opt_compress_algorithm:
                        if (!f2fs_sb_has_compression(sbi)) {
                                f2fs_err(sbi, "Compression feature if off");
@@ -927,6 +931,13 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        F2FS_OPTION(sbi).compress_ext_cnt++;
                        kfree(name);
                        break;
+#else
+               case Opt_compress_algorithm:
+               case Opt_compress_log_size:
+               case Opt_compress_extension:
+                       f2fs_info(sbi, "compression options not supported");
+                       break;
+#endif
                default:
                        f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
                                 p);
@@ -1024,6 +1035,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        /* Will be used by directory only */
        fi->i_dir_level = F2FS_SB(sb)->dir_level;
 
+       fi->ra_offset = -1;
+
        return &fi->vfs_inode;
 }
 
@@ -1182,6 +1195,9 @@ static void f2fs_put_super(struct super_block *sb)
        int i;
        bool dropped;
 
+       /* unregister procfs/sysfs entries in advance to avoid race case */
+       f2fs_unregister_sysfs(sbi);
+
        f2fs_quota_off_umount(sb);
 
        /* prevent remaining shrinker jobs */
@@ -1247,19 +1263,17 @@ static void f2fs_put_super(struct super_block *sb)
 
        kvfree(sbi->ckpt);
 
-       f2fs_unregister_sysfs(sbi);
-
        sb->s_fs_info = NULL;
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
-       kvfree(sbi->raw_super);
+       kfree(sbi->raw_super);
 
        destroy_device_list(sbi);
        f2fs_destroy_xattr_caches(sbi);
        mempool_destroy(sbi->write_io_dummy);
 #ifdef CONFIG_QUOTA
        for (i = 0; i < MAXQUOTAS; i++)
-               kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
+               kfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
        fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx);
        destroy_percpu_info(sbi);
@@ -1268,7 +1282,7 @@ static void f2fs_put_super(struct super_block *sb)
 #ifdef CONFIG_UNICODE
        utf8_unload(sbi->s_encoding);
 #endif
-       kvfree(sbi);
+       kfree(sbi);
 }
 
 int f2fs_sync_fs(struct super_block *sb, int sync)
@@ -1617,7 +1631,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
                seq_printf(seq, ",fsync_mode=%s", "nobarrier");
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
        f2fs_show_compress_options(seq, sbi->sb);
+#endif
        return 0;
 }
 
@@ -1768,7 +1784,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                                GFP_KERNEL);
                        if (!org_mount_opt.s_qf_names[i]) {
                                for (j = 0; j < i; j++)
-                                       kvfree(org_mount_opt.s_qf_names[j]);
+                                       kfree(org_mount_opt.s_qf_names[j]);
                                return -ENOMEM;
                        }
                } else {
@@ -1893,7 +1909,7 @@ skip:
 #ifdef CONFIG_QUOTA
        /* Release old quota file names */
        for (i = 0; i < MAXQUOTAS; i++)
-               kvfree(org_mount_opt.s_qf_names[i]);
+               kfree(org_mount_opt.s_qf_names[i]);
 #endif
        /* Update the POSIXACL Flag */
        sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
@@ -1914,7 +1930,7 @@ restore_opts:
 #ifdef CONFIG_QUOTA
        F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
        for (i = 0; i < MAXQUOTAS; i++) {
-               kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
+               kfree(F2FS_OPTION(sbi).s_qf_names[i]);
                F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
        }
 #endif
@@ -3172,7 +3188,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
 
        /* No valid superblock */
        if (!*raw_super)
-               kvfree(super);
+               kfree(super);
        else
                err = 0;
 
@@ -3846,16 +3862,16 @@ free_bio_info:
 free_options:
 #ifdef CONFIG_QUOTA
        for (i = 0; i < MAXQUOTAS; i++)
-               kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
+               kfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
        fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx);
        kvfree(options);
 free_sb_buf:
-       kvfree(raw_super);
+       kfree(raw_super);
 free_sbi:
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
-       kvfree(sbi);
+       kfree(sbi);
 
        /* give only one another chance */
        if (retry_cnt > 0 && skip_recovery) {
index e877c59..88ed996 100644 (file)
@@ -27,7 +27,7 @@ enum {
        NM_INFO,        /* struct f2fs_nm_info */
        F2FS_SBI,       /* struct f2fs_sb_info */
 #ifdef CONFIG_F2FS_STAT_FS
-       STAT_INFO,      /* struct f2fs_stat_info */
+       STAT_INFO,      /* struct f2fs_stat_info */
 #endif
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        FAULT_INFO_RATE,        /* struct f2fs_fault_info */
@@ -223,6 +223,13 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a,
 }
 #endif
 
+static ssize_t main_blkaddr_show(struct f2fs_attr *a,
+                               struct f2fs_sb_info *sbi, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+                       (unsigned long long)MAIN_BLKADDR(sbi));
+}
+
 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
                        struct f2fs_sb_info *sbi, char *buf)
 {
@@ -350,16 +357,20 @@ out:
                return -EINVAL;
 
        if (!strcmp(a->attr.name, "gc_urgent")) {
-               if (t >= 1) {
-                       sbi->gc_mode = GC_URGENT;
+               if (t == 0) {
+                       sbi->gc_mode = GC_NORMAL;
+               } else if (t == 1) {
+                       sbi->gc_mode = GC_URGENT_HIGH;
                        if (sbi->gc_thread) {
                                sbi->gc_thread->gc_wake = 1;
                                wake_up_interruptible_all(
                                        &sbi->gc_thread->gc_wait_queue_head);
                                wake_up_discard_thread(sbi, true);
                        }
+               } else if (t == 2) {
+                       sbi->gc_mode = GC_URGENT_LOW;
                } else {
-                       sbi->gc_mode = GC_NORMAL;
+                       return -EINVAL;
                }
                return count;
        }
@@ -522,7 +533,6 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
 F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
 F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
@@ -565,6 +575,7 @@ F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
 F2FS_GENERAL_RO_ATTR(unusable);
 F2FS_GENERAL_RO_ATTR(encoding);
 F2FS_GENERAL_RO_ATTR(mounted_time_sec);
+F2FS_GENERAL_RO_ATTR(main_blkaddr);
 #ifdef CONFIG_F2FS_STAT_FS
 F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
 F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
@@ -706,7 +717,7 @@ static struct kobj_type f2fs_ktype = {
 };
 
 static struct kset f2fs_kset = {
-       .kobj   = {.ktype = &f2fs_ktype},
+       .kobj   = {.ktype = &f2fs_ktype},
 };
 
 static struct kobj_type f2fs_feat_ktype = {
index 865c9fb..9eb0dba 100644 (file)
@@ -29,6 +29,8 @@
 #include "f2fs.h"
 #include "xattr.h"
 
+#define F2FS_VERIFY_VER        (1)
+
 static inline loff_t f2fs_verity_metadata_pos(const struct inode *inode)
 {
        return round_up(inode->i_size, 65536);
@@ -152,7 +154,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
        struct inode *inode = file_inode(filp);
        u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
        struct fsverity_descriptor_location dloc = {
-               .version = cpu_to_le32(1),
+               .version = cpu_to_le32(F2FS_VERIFY_VER),
                .size = cpu_to_le32(desc_size),
                .pos = cpu_to_le64(desc_pos),
        };
@@ -199,7 +201,7 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
                            F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), NULL);
        if (res < 0 && res != -ERANGE)
                return res;
-       if (res != sizeof(dloc) || dloc.version != cpu_to_le32(1)) {
+       if (res != sizeof(dloc) || dloc.version != cpu_to_le32(F2FS_VERIFY_VER)) {
                f2fs_warn(F2FS_I_SB(inode), "unknown verity xattr format");
                return -EINVAL;
        }
index 4f6582e..1b0736c 100644 (file)
@@ -175,8 +175,8 @@ const struct xattr_handler f2fs_xattr_trusted_handler = {
 const struct xattr_handler f2fs_xattr_advise_handler = {
        .name   = F2FS_SYSTEM_ADVISE_NAME,
        .flags  = F2FS_XATTR_INDEX_ADVISE,
-       .get    = f2fs_xattr_advise_get,
-       .set    = f2fs_xattr_advise_set,
+       .get    = f2fs_xattr_advise_get,
+       .set    = f2fs_xattr_advise_set,
 };
 
 const struct xattr_handler f2fs_xattr_security_handler = {
index ca31993..66532a7 100644 (file)
@@ -41,7 +41,7 @@ config MSDOS_FS
          they are compressed; to access compressed MSDOS partitions under
          Linux, you can either use the DOS emulator DOSEMU, described in the
          DOSEMU-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>, or try dmsdosfs in
+         <https://www.tldp.org/docs.html#howto>, or try dmsdosfs in
          <ftp://ibiblio.org/pub/Linux/system/filesystems/dosfs/>. If you
          intend to use dosemu with a non-compressed MSDOS partition, say Y
          here) and MSDOS floppies. This means that file access becomes
index bbfe18c..f7e3304 100644 (file)
@@ -657,6 +657,9 @@ static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
        unsigned long ra_pages = sb->s_bdi->ra_pages;
        unsigned int reada_blocks;
 
+       if (fatent->entry >= ent_limit)
+               return;
+
        if (ra_pages > sb->s_bdi->io_pages)
                ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
        reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
index 42134c5..f9ee27c 100644 (file)
@@ -25,9 +25,9 @@ static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
 {
        u32 attr;
 
-       inode_lock(inode);
+       inode_lock_shared(inode);
        attr = fat_make_attrs(inode);
-       inode_unlock(inode);
+       inode_unlock_shared(inode);
 
        return put_user(attr, user_attr);
 }
index ca639ed..04b3f5b 100644 (file)
@@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
                fs->users = 1;
                fs->in_exec = 0;
                spin_lock_init(&fs->lock);
-               seqcount_init(&fs->seq);
+               seqcount_spinlock_init(&fs->seq, &fs->lock);
                fs->umask = old->umask;
 
                spin_lock(&old->lock);
@@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask);
 struct fs_struct init_fs = {
        .users          = 1,
        .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
-       .seq            = SEQCNT_ZERO(init_fs.seq),
+       .seq            = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
        .umask          = 0022,
 };
index 4c4ef5d..104f35d 100644 (file)
@@ -606,8 +606,8 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
        unsigned int i;
        int ret = 0;
 
-       virtio_cread(vdev, struct virtio_fs_config, num_request_queues,
-                    &fs->num_request_queues);
+       virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
+                       &fs->num_request_queues);
        if (fs->num_request_queues == 0)
                return -EINVAL;
 
index 8dfe09f..770f3a7 100644 (file)
@@ -1351,9 +1351,15 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
        return ret;
 }
 
+/*
+ * NOTE: Never call gfs2_block_zero_range with an open transaction because it
+ * uses iomap write to perform its actions, which begin their own transactions
+ * (iomap_begin, page_prepare, etc.)
+ */
 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
                                 unsigned int length)
 {
+       BUG_ON(current->journal_info);
        return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
 }
 
@@ -1414,6 +1420,16 @@ static int trunc_start(struct inode *inode, u64 newsize)
        u64 oldsize = inode->i_size;
        int error;
 
+       if (!gfs2_is_stuffed(ip)) {
+               unsigned int blocksize = i_blocksize(inode);
+               unsigned int offs = newsize & (blocksize - 1);
+               if (offs) {
+                       error = gfs2_block_zero_range(inode, newsize,
+                                                     blocksize - offs);
+                       if (error)
+                               return error;
+               }
+       }
        if (journaled)
                error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
        else
@@ -1427,19 +1443,10 @@ static int trunc_start(struct inode *inode, u64 newsize)
 
        gfs2_trans_add_meta(ip->i_gl, dibh);
 
-       if (gfs2_is_stuffed(ip)) {
+       if (gfs2_is_stuffed(ip))
                gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
-       } else {
-               unsigned int blocksize = i_blocksize(inode);
-               unsigned int offs = newsize & (blocksize - 1);
-               if (offs) {
-                       error = gfs2_block_zero_range(inode, newsize,
-                                                     blocksize - offs);
-                       if (error)
-                               goto out;
-               }
+       else
                ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
-       }
 
        i_size_write(inode, newsize);
        ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
@@ -2448,25 +2455,7 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
        loff_t start, end;
        int error;
 
-       start = round_down(offset, blocksize);
-       end = round_up(offset + length, blocksize) - 1;
-       error = filemap_write_and_wait_range(inode->i_mapping, start, end);
-       if (error)
-               return error;
-
-       if (gfs2_is_jdata(ip))
-               error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
-                                        GFS2_JTRUNC_REVOKES);
-       else
-               error = gfs2_trans_begin(sdp, RES_DINODE, 0);
-       if (error)
-               return error;
-
-       if (gfs2_is_stuffed(ip)) {
-               error = stuffed_zero_range(inode, offset, length);
-               if (error)
-                       goto out;
-       } else {
+       if (!gfs2_is_stuffed(ip)) {
                unsigned int start_off, end_len;
 
                start_off = offset & (blocksize - 1);
@@ -2489,6 +2478,26 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
                }
        }
 
+       start = round_down(offset, blocksize);
+       end = round_up(offset + length, blocksize) - 1;
+       error = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (error)
+               return error;
+
+       if (gfs2_is_jdata(ip))
+               error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
+                                        GFS2_JTRUNC_REVOKES);
+       else
+               error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+       if (error)
+               return error;
+
+       if (gfs2_is_stuffed(ip)) {
+               error = stuffed_zero_range(inode, offset, length);
+               if (error)
+                       goto out;
+       }
+
        if (gfs2_is_jdata(ip)) {
                BUG_ON(!current->journal_info);
                gfs2_journaled_truncate_range(inode, offset, length);
index b085a3b..b39b339 100644 (file)
@@ -781,39 +781,39 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
        return ret ? ret : ret1;
 }
 
-static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
+                                    struct gfs2_holder *gh)
 {
        struct file *file = iocb->ki_filp;
        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
        size_t count = iov_iter_count(to);
-       struct gfs2_holder gh;
        ssize_t ret;
 
        if (!count)
                return 0; /* skip atime */
 
-       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
-       ret = gfs2_glock_nq(&gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
+       ret = gfs2_glock_nq(gh);
        if (ret)
                goto out_uninit;
 
        ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
                           is_sync_kiocb(iocb));
 
-       gfs2_glock_dq(&gh);
+       gfs2_glock_dq(gh);
 out_uninit:
-       gfs2_holder_uninit(&gh);
+       gfs2_holder_uninit(gh);
        return ret;
 }
 
-static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
+                                     struct gfs2_holder *gh)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
        size_t len = iov_iter_count(from);
        loff_t offset = iocb->ki_pos;
-       struct gfs2_holder gh;
        ssize_t ret;
 
        /*
@@ -824,8 +824,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
         * unfortunately, have the option of only flushing a range like the
         * VFS does.
         */
-       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
-       ret = gfs2_glock_nq(&gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
+       ret = gfs2_glock_nq(gh);
        if (ret)
                goto out_uninit;
 
@@ -838,9 +838,9 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
        if (ret == -ENOTBLK)
                ret = 0;
 out:
-       gfs2_glock_dq(&gh);
+       gfs2_glock_dq(gh);
 out_uninit:
-       gfs2_holder_uninit(&gh);
+       gfs2_holder_uninit(gh);
        return ret;
 }
 
@@ -852,7 +852,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
        ssize_t ret;
 
        if (iocb->ki_flags & IOCB_DIRECT) {
-               ret = gfs2_file_direct_read(iocb, to);
+               ret = gfs2_file_direct_read(iocb, to, &gh);
                if (likely(ret != -ENOTBLK))
                        return ret;
                iocb->ki_flags &= ~IOCB_DIRECT;
@@ -901,13 +901,12 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_holder gh;
        ssize_t ret;
 
        gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
 
        if (iocb->ki_flags & IOCB_APPEND) {
-               struct gfs2_holder gh;
-
                ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
                if (ret)
                        return ret;
@@ -931,7 +930,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                struct address_space *mapping = file->f_mapping;
                ssize_t buffered, ret2;
 
-               ret = gfs2_file_direct_write(iocb, from);
+               ret = gfs2_file_direct_write(iocb, from, &gh);
                if (ret < 0 || !iov_iter_count(from))
                        goto out_unlock;
 
index 8545024..f13b136 100644 (file)
@@ -790,9 +790,11 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
        struct gfs2_holder gh;
        int error;
 
-       error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh);
+       gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
+       error = gfs2_glock_nq(&gh);
        if (!error)
                gfs2_glock_dq(&gh);
+       gfs2_holder_uninit(&gh);
 }
 
 static bool gfs2_try_evict(struct gfs2_glock *gl)
@@ -2106,6 +2108,12 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
                *p++ = 'o';
        if (test_bit(GLF_BLOCKING, gflags))
                *p++ = 'b';
+       if (test_bit(GLF_INODE_CREATING, gflags))
+               *p++ = 'c';
+       if (test_bit(GLF_PENDING_DELETE, gflags))
+               *p++ = 'P';
+       if (test_bit(GLF_FREEING, gflags))
+               *p++ = 'x';
        *p = 0;
        return buf;
 }
index a76e55b..a58333e 100644 (file)
@@ -1092,7 +1092,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  * or the total number of used blocks (pinned blocks plus AIL blocks)
  * is greater than thresh2.
  *
- * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
+ * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
  * journal size.
  *
  * Returns: errno
index 47d0ae1..9f4d9e7 100644 (file)
@@ -566,6 +566,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
                ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
                if (ret) {
                        fs_err(sdp, "dirty_inode: glock %d\n", ret);
+                       gfs2_dump_glock(NULL, ip->i_gl, true);
                        return;
                }
                need_unlock = 1;
index a3dfa3a..e1c7eb6 100644 (file)
 #include "util.h"
 #include "trace_gfs2.h"
 
+static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
+{
+       fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
+       fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
+               tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
+               test_bit(TR_TOUCHED, &tr->tr_flags));
+       fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+               tr->tr_num_buf_new, tr->tr_num_buf_rm,
+               tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
+               tr->tr_num_revoke, tr->tr_num_revoke_rm);
+}
+
 int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
                     unsigned int revokes)
 {
        struct gfs2_trans *tr;
        int error;
 
-       BUG_ON(current->journal_info);
+       if (current->journal_info) {
+               gfs2_print_trans(sdp, current->journal_info);
+               BUG();
+       }
        BUG_ON(blocks == 0 && revokes == 0);
 
        if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
@@ -72,18 +87,6 @@ fail:
        return error;
 }
 
-static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
-{
-       fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
-       fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
-               tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
-               test_bit(TR_TOUCHED, &tr->tr_flags));
-       fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
-               tr->tr_num_buf_new, tr->tr_num_buf_rm,
-               tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
-               tr->tr_num_revoke, tr->tr_num_revoke_rm);
-}
-
 void gfs2_trans_end(struct gfs2_sbd *sdp)
 {
        struct gfs2_trans *tr = current->journal_info;
index 523954d..b5c1097 100644 (file)
@@ -1364,6 +1364,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
        sb->s_magic = HUGETLBFS_MAGIC;
        sb->s_op = &hugetlbfs_ops;
        sb->s_time_gran = 1;
+
+       /*
+        * Due to the special and limited functionality of hugetlbfs, it does
+        * not work well as a stacking filesystem.
+        */
+       sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
        sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
        if (!sb->s_root)
                goto out_free;
index 2a3af95..91e2cc8 100644 (file)
@@ -508,9 +508,9 @@ struct io_async_msghdr {
 
 struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
-       struct iovec                    *iov;
-       ssize_t                         nr_segs;
-       ssize_t                         size;
+       const struct iovec              *free_iovec;
+       struct iov_iter                 iter;
+       size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
 
@@ -540,7 +540,6 @@ enum {
        REQ_F_ISREG_BIT,
        REQ_F_COMP_LOCKED_BIT,
        REQ_F_NEED_CLEANUP_BIT,
-       REQ_F_OVERFLOW_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_NO_FILE_TABLE_BIT,
@@ -583,8 +582,6 @@ enum {
        REQ_F_COMP_LOCKED       = BIT(REQ_F_COMP_LOCKED_BIT),
        /* needs cleanup */
        REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
-       /* in overflow list */
-       REQ_F_OVERFLOW          = BIT(REQ_F_OVERFLOW_BIT),
        /* already went through poll handler */
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
@@ -898,6 +895,7 @@ static void io_put_req(struct io_kiocb *req);
 static void io_double_put_req(struct io_kiocb *req);
 static void __io_double_put_req(struct io_kiocb *req);
 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
+static void __io_queue_linked_timeout(struct io_kiocb *req);
 static void io_queue_linked_timeout(struct io_kiocb *req);
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_files_update *ip,
@@ -914,9 +912,9 @@ static void io_file_put_work(struct work_struct *work);
 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
                               struct iovec **iovec, struct iov_iter *iter,
                               bool needs_lock);
-static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
-                            struct iovec *iovec, struct iovec *fast_iov,
-                            struct iov_iter *iter);
+static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
+                            const struct iovec *fast_iov,
+                            struct iov_iter *iter, bool force);
 
 static struct kmem_cache *req_cachep;
 
@@ -945,7 +943,8 @@ static void io_get_req_task(struct io_kiocb *req)
 
 static inline void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+                         REQ_F_INFLIGHT))
                __io_clean_op(req);
 }
 
@@ -1107,10 +1106,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
        }
 }
 
-static void io_req_clean_work(struct io_kiocb *req)
+/*
+ * Returns true if we need to defer file table putting. This can only happen
+ * from the error path with REQ_F_COMP_LOCKED set.
+ */
+static bool io_req_clean_work(struct io_kiocb *req)
 {
        if (!(req->flags & REQ_F_WORK_INITIALIZED))
-               return;
+               return false;
+
+       req->flags &= ~REQ_F_WORK_INITIALIZED;
 
        if (req->work.mm) {
                mmdrop(req->work.mm);
@@ -1123,6 +1128,9 @@ static void io_req_clean_work(struct io_kiocb *req)
        if (req->work.fs) {
                struct fs_struct *fs = req->work.fs;
 
+               if (req->flags & REQ_F_COMP_LOCKED)
+                       return true;
+
                spin_lock(&req->work.fs->lock);
                if (--fs->users)
                        fs = NULL;
@@ -1131,7 +1139,8 @@ static void io_req_clean_work(struct io_kiocb *req)
                        free_fs_struct(fs);
                req->work.fs = NULL;
        }
-       req->flags &= ~REQ_F_WORK_INITIALIZED;
+
+       return false;
 }
 
 static void io_prep_async_work(struct io_kiocb *req)
@@ -1179,7 +1188,7 @@ static void io_prep_async_link(struct io_kiocb *req)
                        io_prep_async_work(cur);
 }
 
-static void __io_queue_async_work(struct io_kiocb *req)
+static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *link = io_prep_linked_timeout(req);
@@ -1187,16 +1196,19 @@ static void __io_queue_async_work(struct io_kiocb *req)
        trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
                                        &req->work, req->flags);
        io_wq_enqueue(ctx->io_wq, &req->work);
-
-       if (link)
-               io_queue_linked_timeout(link);
+       return link;
 }
 
 static void io_queue_async_work(struct io_kiocb *req)
 {
+       struct io_kiocb *link;
+
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
-       __io_queue_async_work(req);
+       link = __io_queue_async_work(req);
+
+       if (link)
+               io_queue_linked_timeout(link);
 }
 
 static void io_kill_timeout(struct io_kiocb *req)
@@ -1229,12 +1241,19 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
        do {
                struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
                                                struct io_defer_entry, list);
+               struct io_kiocb *link;
 
                if (req_need_defer(de->req, de->seq))
                        break;
                list_del_init(&de->list);
                /* punt-init is done before queueing for defer */
-               __io_queue_async_work(de->req);
+               link = __io_queue_async_work(de->req);
+               if (link) {
+                       __io_queue_linked_timeout(link);
+                       /* drop submission reference */
+                       link->flags |= REQ_F_COMP_LOCKED;
+                       io_put_req(link);
+               }
                kfree(de);
        } while (!list_empty(&ctx->defer_list));
 }
@@ -1345,7 +1364,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
                req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
                                                compl.list);
                list_move(&req->compl.list, &list);
-               req->flags &= ~REQ_F_OVERFLOW;
                if (cqe) {
                        WRITE_ONCE(cqe->user_data, req->user_data);
                        WRITE_ONCE(cqe->res, req->result);
@@ -1398,7 +1416,6 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                        ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
                }
                io_clean_op(req);
-               req->flags |= REQ_F_OVERFLOW;
                req->result = res;
                req->compl.cflags = cflags;
                refcount_inc(&req->refs);
@@ -1533,7 +1550,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
                fput(file);
 }
 
-static void io_dismantle_req(struct io_kiocb *req)
+static bool io_dismantle_req(struct io_kiocb *req)
 {
        io_clean_op(req);
 
@@ -1541,27 +1558,15 @@ static void io_dismantle_req(struct io_kiocb *req)
                kfree(req->io);
        if (req->file)
                io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
-       io_req_clean_work(req);
-
-       if (req->flags & REQ_F_INFLIGHT) {
-               struct io_ring_ctx *ctx = req->ctx;
-               unsigned long flags;
 
-               spin_lock_irqsave(&ctx->inflight_lock, flags);
-               list_del(&req->inflight_entry);
-               if (waitqueue_active(&ctx->inflight_wait))
-                       wake_up(&ctx->inflight_wait);
-               spin_unlock_irqrestore(&ctx->inflight_lock, flags);
-       }
+       return io_req_clean_work(req);
 }
 
-static void __io_free_req(struct io_kiocb *req)
+static void __io_free_req_finish(struct io_kiocb *req)
 {
-       struct io_ring_ctx *ctx;
+       struct io_ring_ctx *ctx = req->ctx;
 
-       io_dismantle_req(req);
        __io_put_req_task(req);
-       ctx = req->ctx;
        if (likely(!io_is_fallback_req(req)))
                kmem_cache_free(req_cachep, req);
        else
@@ -1569,6 +1574,39 @@ static void __io_free_req(struct io_kiocb *req)
        percpu_ref_put(&ctx->refs);
 }
 
+static void io_req_task_file_table_put(struct callback_head *cb)
+{
+       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+       struct fs_struct *fs = req->work.fs;
+
+       spin_lock(&req->work.fs->lock);
+       if (--fs->users)
+               fs = NULL;
+       spin_unlock(&req->work.fs->lock);
+       if (fs)
+               free_fs_struct(fs);
+       req->work.fs = NULL;
+       __io_free_req_finish(req);
+}
+
+static void __io_free_req(struct io_kiocb *req)
+{
+       if (!io_dismantle_req(req)) {
+               __io_free_req_finish(req);
+       } else {
+               int ret;
+
+               init_task_work(&req->task_work, io_req_task_file_table_put);
+               ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
+               if (unlikely(ret)) {
+                       struct task_struct *tsk;
+
+                       tsk = io_wq_get_task(req->ctx->io_wq);
+                       task_work_add(tsk, &req->task_work, 0);
+               }
+       }
+}
+
 static bool io_link_cancel_timeout(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -1598,6 +1636,7 @@ static bool __io_kill_linked_timeout(struct io_kiocb *req)
                return false;
 
        list_del_init(&link->link_list);
+       link->flags |= REQ_F_COMP_LOCKED;
        wake_ev = io_link_cancel_timeout(link);
        req->flags &= ~REQ_F_LINK_TIMEOUT;
        return wake_ev;
@@ -1656,6 +1695,7 @@ static void __io_fail_links(struct io_kiocb *req)
                trace_io_uring_fail_link(req, link);
 
                io_cqring_fill_event(link, -ECANCELED);
+               link->flags |= REQ_F_COMP_LOCKED;
                __io_double_put_req(link);
                req->flags &= ~REQ_F_LINK_TIMEOUT;
        }
@@ -1710,22 +1750,22 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
 {
        struct task_struct *tsk = req->task;
        struct io_ring_ctx *ctx = req->ctx;
-       int ret, notify = TWA_RESUME;
+       int ret, notify;
 
        /*
-        * SQPOLL kernel thread doesn't need notification, just a wakeup.
-        * If we're not using an eventfd, then TWA_RESUME is always fine,
-        * as we won't have dependencies between request completions for
-        * other kernel wait conditions.
+        * SQPOLL kernel thread doesn't need notification, just a wakeup. For
+        * all other cases, use TWA_SIGNAL unconditionally to ensure we're
+        * processing task_work. There's no reliable way to tell if TWA_RESUME
+        * will do the job.
         */
-       if (ctx->flags & IORING_SETUP_SQPOLL)
-               notify = 0;
-       else if (ctx->cq_ev_fd)
+       notify = 0;
+       if (!(ctx->flags & IORING_SETUP_SQPOLL))
                notify = TWA_SIGNAL;
 
        ret = task_work_add(tsk, cb, notify);
        if (!ret)
                wake_up_process(tsk);
+
        return ret;
 }
 
@@ -1766,8 +1806,10 @@ static void __io_req_task_submit(struct io_kiocb *req)
 static void io_req_task_submit(struct callback_head *cb)
 {
        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+       struct io_ring_ctx *ctx = req->ctx;
 
        __io_req_task_submit(req);
+       percpu_ref_put(&ctx->refs);
 }
 
 static void io_req_task_queue(struct io_kiocb *req)
@@ -1775,6 +1817,7 @@ static void io_req_task_queue(struct io_kiocb *req)
        int ret;
 
        init_task_work(&req->task_work, io_req_task_submit);
+       percpu_ref_get(&req->ctx->refs);
 
        ret = io_req_task_work_add(req, &req->task_work);
        if (unlikely(ret)) {
@@ -1855,7 +1898,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
                req->flags &= ~REQ_F_TASK_PINNED;
        }
 
-       io_dismantle_req(req);
+       WARN_ON_ONCE(io_dismantle_req(req));
        rb->reqs[rb->to_free++] = req;
        if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
                __io_req_free_batch_flush(req->ctx, rb);
@@ -2241,7 +2284,7 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
        ret = io_import_iovec(rw, req, &iovec, &iter, false);
        if (ret < 0)
                goto end_req;
-       ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter);
+       ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
        if (!ret)
                return true;
        kfree(iovec);
@@ -2263,6 +2306,8 @@ static void io_rw_resubmit(struct callback_head *cb)
                refcount_inc(&req->refs);
                io_queue_async_work(req);
        }
+
+       percpu_ref_put(&ctx->refs);
 }
 #endif
 
@@ -2275,6 +2320,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
                return false;
 
        init_task_work(&req->task_work, io_rw_resubmit);
+       percpu_ref_get(&req->ctx->refs);
+
        ret = io_req_task_work_add(req, &req->task_work);
        if (!ret)
                return true;
@@ -2527,6 +2574,14 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
 
+       /* add previously done IO, if any */
+       if (req->io && req->io->rw.bytes_done > 0) {
+               if (ret < 0)
+                       ret = req->io->rw.bytes_done;
+               else
+                       ret += req->io->rw.bytes_done;
+       }
+
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
        if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
@@ -2749,9 +2804,9 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
        return __io_iov_buffer_select(req, iov, needs_lock);
 }
 
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
-                              struct iovec **iovec, struct iov_iter *iter,
-                              bool needs_lock)
+static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
+                                struct iovec **iovec, struct iov_iter *iter,
+                                bool needs_lock)
 {
        void __user *buf = u64_to_user_ptr(req->rw.addr);
        size_t sqe_len = req->rw.len;
@@ -2771,10 +2826,8 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
                if (req->flags & REQ_F_BUFFER_SELECT) {
                        buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
-                       if (IS_ERR(buf)) {
-                               *iovec = NULL;
+                       if (IS_ERR(buf))
                                return PTR_ERR(buf);
-                       }
                        req->rw.len = sqe_len;
                }
 
@@ -2783,14 +2836,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
                return ret < 0 ? ret : sqe_len;
        }
 
-       if (req->io) {
-               struct io_async_rw *iorw = &req->io->rw;
-
-               iov_iter_init(iter, rw, iorw->iov, iorw->nr_segs, iorw->size);
-               *iovec = NULL;
-               return iorw->size;
-       }
-
        if (req->flags & REQ_F_BUFFER_SELECT) {
                ret = io_iov_buffer_select(req, *iovec, needs_lock);
                if (!ret) {
@@ -2810,6 +2855,16 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
        return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
 }
 
+static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+                              struct iovec **iovec, struct iov_iter *iter,
+                              bool needs_lock)
+{
+       if (!req->io)
+               return __io_import_iovec(rw, req, iovec, iter, needs_lock);
+       *iovec = NULL;
+       return iov_iter_count(&req->io->rw.iter);
+}
+
 /*
  * For files that don't have ->read_iter() and ->write_iter(), handle them
  * by looping over ->read() or ->write() manually.
@@ -2868,21 +2923,30 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
        return ret;
 }
 
-static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
-                         struct iovec *iovec, struct iovec *fast_iov,
-                         struct iov_iter *iter)
+static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
+                         const struct iovec *fast_iov, struct iov_iter *iter)
 {
        struct io_async_rw *rw = &req->io->rw;
 
-       rw->nr_segs = iter->nr_segs;
-       rw->size = io_size;
+       memcpy(&rw->iter, iter, sizeof(*iter));
+       rw->free_iovec = NULL;
+       rw->bytes_done = 0;
+       /* can only be fixed buffers, no need to do anything */
+       if (iter->type == ITER_BVEC)
+               return;
        if (!iovec) {
-               rw->iov = rw->fast_iov;
-               if (rw->iov != fast_iov)
-                       memcpy(rw->iov, fast_iov,
+               unsigned iov_off = 0;
+
+               rw->iter.iov = rw->fast_iov;
+               if (iter->iov != fast_iov) {
+                       iov_off = iter->iov - fast_iov;
+                       rw->iter.iov += iov_off;
+               }
+               if (rw->fast_iov != fast_iov)
+                       memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
                               sizeof(struct iovec) * iter->nr_segs);
        } else {
-               rw->iov = iovec;
+               rw->free_iovec = iovec;
                req->flags |= REQ_F_NEED_CLEANUP;
        }
 }
@@ -2901,17 +2965,17 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
        return  __io_alloc_async_ctx(req);
 }
 
-static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
-                            struct iovec *iovec, struct iovec *fast_iov,
-                            struct iov_iter *iter)
+static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
+                            const struct iovec *fast_iov,
+                            struct iov_iter *iter, bool force)
 {
-       if (!io_op_defs[req->opcode].async_ctx)
+       if (!force && !io_op_defs[req->opcode].async_ctx)
                return 0;
        if (!req->io) {
                if (__io_alloc_async_ctx(req))
                        return -ENOMEM;
 
-               io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+               io_req_map_rw(req, iovec, fast_iov, iter);
        }
        return 0;
 }
@@ -2919,18 +2983,16 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
 static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
                                   bool force_nonblock)
 {
-       struct io_async_ctx *io = req->io;
-       struct iov_iter iter;
+       struct io_async_rw *iorw = &req->io->rw;
        ssize_t ret;
 
-       io->rw.iov = io->rw.fast_iov;
-       req->io = NULL;
-       ret = io_import_iovec(rw, req, &io->rw.iov, &iter, !force_nonblock);
-       req->io = io;
+       iorw->iter.iov = iorw->fast_iov;
+       ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
+                               &iorw->iter, !force_nonblock);
        if (unlikely(ret < 0))
                return ret;
 
-       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
        return 0;
 }
 
@@ -2952,6 +3014,16 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        return io_rw_prep_async(req, READ, force_nonblock);
 }
 
+/*
+ * This is our waitqueue callback handler, registered through lock_page_async()
+ * when we initially tried to do the IO with the iocb armed our waitqueue.
+ * This gets called when the page is unlocked, and we generally expect that to
+ * happen when the page IO is completed and the page is now uptodate. This will
+ * queue a task_work based retry of the operation, attempting to copy the data
+ * again. If the latter fails because the page was NOT uptodate, then we will
+ * do a thread based blocking retry of the operation. That's the unexpected
+ * slow path.
+ */
 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
                             int sync, void *arg)
 {
@@ -2965,13 +3037,11 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
        if (!wake_page_match(wpq, key))
                return 0;
 
-       /* Stop waking things up if the page is locked again */
-       if (test_bit(key->bit_nr, &key->page->flags))
-               return -1;
-
        list_del_init(&wait->entry);
 
        init_task_work(&req->task_work, io_req_task_submit);
+       percpu_ref_get(&req->ctx->refs);
+
        /* submit ref gets dropped, acquire a new one */
        refcount_inc(&req->refs);
        ret = io_req_task_work_add(req, &req->task_work);
@@ -2987,40 +3057,31 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
        return 1;
 }
 
-static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
-                                            struct wait_page_queue *wait,
-                                            wait_queue_func_t func,
-                                            void *data)
-{
-       /* Can't support async wakeup with polled IO */
-       if (kiocb->ki_flags & IOCB_HIPRI)
-               return -EINVAL;
-       if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
-               wait->wait.func = func;
-               wait->wait.private = data;
-               wait->wait.flags = 0;
-               INIT_LIST_HEAD(&wait->wait.entry);
-               kiocb->ki_flags |= IOCB_WAITQ;
-               kiocb->ki_waitq = wait;
-               return 0;
-       }
-
-       return -EOPNOTSUPP;
-}
-
-
+/*
+ * This controls whether a given IO request should be armed for async page
+ * based retry. If we return false here, the request is handed to the async
+ * worker threads for retry. If we're doing buffered reads on a regular file,
+ * we prepare a private wait_page_queue entry and retry the operation. This
+ * will either succeed because the page is now uptodate and unlocked, or it
+ * will register a callback when the page is unlocked at IO completion. Through
+ * that callback, io_uring uses task_work to setup a retry of the operation.
+ * That retry will attempt the buffered read again. The retry will generally
+ * succeed, or in rare cases where it fails, we then fall back to using the
+ * async worker threads for a blocking retry.
+ */
 static bool io_rw_should_retry(struct io_kiocb *req)
 {
+       struct wait_page_queue *wait = &req->io->rw.wpq;
        struct kiocb *kiocb = &req->rw.kiocb;
-       int ret;
 
        /* never retry for NOWAIT, we just complete with -EAGAIN */
        if (req->flags & REQ_F_NOWAIT)
                return false;
 
-       /* already tried, or we're doing O_DIRECT */
-       if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_WAITQ))
+       /* Only for buffered IO */
+       if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
                return false;
+
        /*
         * just use poll if we can, and don't attempt if the fs doesn't
         * support callback based unlocks
@@ -3028,28 +3089,25 @@ static bool io_rw_should_retry(struct io_kiocb *req)
        if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
                return false;
 
-       /*
-        * If request type doesn't require req->io to defer in general,
-        * we need to allocate it here
-        */
-       if (!req->io && __io_alloc_async_ctx(req))
-               return false;
+       wait->wait.func = io_async_buf_func;
+       wait->wait.private = req;
+       wait->wait.flags = 0;
+       INIT_LIST_HEAD(&wait->wait.entry);
+       kiocb->ki_flags |= IOCB_WAITQ;
+       kiocb->ki_waitq = wait;
 
-       ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
-                                               io_async_buf_func, req);
-       if (!ret) {
-               io_get_req_task(req);
-               return true;
-       }
-
-       return false;
+       io_get_req_task(req);
+       return true;
 }
 
 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
 {
        if (req->file->f_op->read_iter)
                return call_read_iter(req->file, &req->rw.kiocb, iter);
-       return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+       else if (req->file->f_op->read)
+               return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+       else
+               return -EINVAL;
 }
 
 static int io_read(struct io_kiocb *req, bool force_nonblock,
@@ -3057,16 +3115,19 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw.kiocb;
-       struct iov_iter iter;
-       size_t iov_count;
+       struct iov_iter __iter, *iter = &__iter;
        ssize_t io_size, ret, ret2;
-       unsigned long nr_segs;
+       size_t iov_count;
 
-       ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
+       if (req->io)
+               iter = &req->io->rw.iter;
+
+       ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
        if (ret < 0)
                return ret;
        io_size = ret;
        req->result = io_size;
+       ret = 0;
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3076,41 +3137,72 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
        if (force_nonblock && !io_file_supports_async(req->file, READ))
                goto copy_iov;
 
-       iov_count = iov_iter_count(&iter);
-       nr_segs = iter.nr_segs;
+       iov_count = iov_iter_count(iter);
        ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
        if (unlikely(ret))
                goto out_free;
 
-       ret2 = io_iter_do_read(req, &iter);
+       ret = io_iter_do_read(req, iter);
 
-       /* Catch -EAGAIN return for forced non-blocking submission */
-       if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
-               kiocb_done(kiocb, ret2, cs);
-       } else {
-               iter.count = iov_count;
-               iter.nr_segs = nr_segs;
-copy_iov:
-               ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
-                                       &iter);
+       if (!ret) {
+               goto done;
+       } else if (ret == -EIOCBQUEUED) {
+               ret = 0;
+               goto out_free;
+       } else if (ret == -EAGAIN) {
+               if (!force_nonblock)
+                       goto done;
+               ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
                if (ret)
                        goto out_free;
-               /* it's copied and will be cleaned with ->io */
-               iovec = NULL;
-               /* if we can retry, do so with the callbacks armed */
-               if (io_rw_should_retry(req)) {
-                       ret2 = io_iter_do_read(req, &iter);
-                       if (ret2 == -EIOCBQUEUED) {
-                               goto out_free;
-                       } else if (ret2 != -EAGAIN) {
-                               kiocb_done(kiocb, ret2, cs);
-                               goto out_free;
-                       }
-               }
+               return -EAGAIN;
+       } else if (ret < 0) {
+               goto out_free;
+       }
+
+       /* read it all, or we did blocking attempt. no retry. */
+       if (!iov_iter_count(iter) || !force_nonblock ||
+           (req->file->f_flags & O_NONBLOCK))
+               goto done;
+
+       io_size -= ret;
+copy_iov:
+       ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
+       if (ret2) {
+               ret = ret2;
+               goto out_free;
+       }
+       /* it's copied and will be cleaned with ->io */
+       iovec = NULL;
+       /* now use our persistent iterator, if we aren't already */
+       iter = &req->io->rw.iter;
+retry:
+       req->io->rw.bytes_done += ret;
+       /* if we can retry, do so with the callbacks armed */
+       if (!io_rw_should_retry(req)) {
                kiocb->ki_flags &= ~IOCB_WAITQ;
                return -EAGAIN;
        }
+
+       /*
+        * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
+        * get -EIOCBQUEUED, then we'll get a notification when the desired
+        * page gets unlocked. We can also get a partial read here, and if we
+        * do, then just retry at the new offset.
+        */
+       ret = io_iter_do_read(req, iter);
+       if (ret == -EIOCBQUEUED) {
+               ret = 0;
+               goto out_free;
+       } else if (ret > 0 && ret < io_size) {
+               /* we got some bytes, but not all. retry. */
+               goto retry;
+       }
+done:
+       kiocb_done(kiocb, ret, cs);
+       ret = 0;
 out_free:
+       /* it's reportedly faster than delegating the null check to kfree() */
        if (iovec)
                kfree(iovec);
        return ret;
@@ -3139,12 +3231,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw.kiocb;
-       struct iov_iter iter;
+       struct iov_iter __iter, *iter = &__iter;
        size_t iov_count;
        ssize_t ret, ret2, io_size;
-       unsigned long nr_segs;
 
-       ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
+       if (req->io)
+               iter = &req->io->rw.iter;
+
+       ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
        if (ret < 0)
                return ret;
        io_size = ret;
@@ -3163,8 +3257,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       iov_count = iov_iter_count(&iter);
-       nr_segs = iter.nr_segs;
+       iov_count = iov_iter_count(iter);
        ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
        if (unlikely(ret))
                goto out_free;
@@ -3185,9 +3278,11 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
        kiocb->ki_flags |= IOCB_WRITE;
 
        if (req->file->f_op->write_iter)
-               ret2 = call_write_iter(req->file, kiocb, &iter);
+               ret2 = call_write_iter(req->file, kiocb, iter);
+       else if (req->file->f_op->write)
+               ret2 = loop_rw_iter(WRITE, req->file, kiocb, iter);
        else
-               ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+               ret2 = -EINVAL;
 
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
@@ -3198,18 +3293,13 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
        if (!force_nonblock || ret2 != -EAGAIN) {
                kiocb_done(kiocb, ret2, cs);
        } else {
-               iter.count = iov_count;
-               iter.nr_segs = nr_segs;
 copy_iov:
-               ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
-                                       &iter);
-               if (ret)
-                       goto out_free;
-               /* it's copied and will be cleaned with ->io */
-               iovec = NULL;
-               return -EAGAIN;
+               ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
+               if (!ret)
+                       return -EAGAIN;
        }
 out_free:
+       /* it's reportedly faster than delegating the null check to kfree() */
        if (iovec)
                kfree(iovec);
        return ret;
@@ -4488,6 +4578,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
 
        req->result = mask;
        init_task_work(&req->task_work, func);
+       percpu_ref_get(&req->ctx->refs);
+
        /*
         * If this fails, then the task is exiting. When a task exits, the
         * work gets canceled, so just cancel this request as well instead
@@ -4526,9 +4618,24 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
        return false;
 }
 
-static void io_poll_remove_double(struct io_kiocb *req, void *data)
+static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
 {
-       struct io_poll_iocb *poll = data;
+       /* pure poll stashes this in ->io, poll driven retry elsewhere */
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return (struct io_poll_iocb *) req->io;
+       return req->apoll->double_poll;
+}
+
+static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
+{
+       if (req->opcode == IORING_OP_POLL_ADD)
+               return &req->poll;
+       return &req->apoll->poll;
+}
+
+static void io_poll_remove_double(struct io_kiocb *req)
+{
+       struct io_poll_iocb *poll = io_poll_get_double(req);
 
        lockdep_assert_held(&req->ctx->completion_lock);
 
@@ -4548,7 +4655,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       io_poll_remove_double(req, req->io);
+       io_poll_remove_double(req);
        req->poll.done = true;
        io_cqring_fill_event(req, error ? error : mangle_poll(mask));
        io_commit_cqring(ctx);
@@ -4575,18 +4682,20 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
 static void io_poll_task_func(struct callback_head *cb)
 {
        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+       struct io_ring_ctx *ctx = req->ctx;
        struct io_kiocb *nxt = NULL;
 
        io_poll_task_handler(req, &nxt);
        if (nxt)
                __io_req_task_submit(nxt);
+       percpu_ref_put(&ctx->refs);
 }
 
 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
                               int sync, void *key)
 {
        struct io_kiocb *req = wait->private;
-       struct io_poll_iocb *poll = req->apoll->double_poll;
+       struct io_poll_iocb *poll = io_poll_get_single(req);
        __poll_t mask = key_to_poll(key);
 
        /* for instances that support it check for an event match first: */
@@ -4600,6 +4709,8 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
                done = list_empty(&poll->wait.entry);
                if (!done)
                        list_del_init(&poll->wait.entry);
+               /* make sure double remove sees this as being gone */
+               wait->private = NULL;
                spin_unlock(&poll->head->lock);
                if (!done)
                        __io_async_wake(req, poll, mask, io_poll_task_func);
@@ -4675,6 +4786,7 @@ static void io_async_task_func(struct callback_head *cb)
 
        if (io_poll_rewait(req, &apoll->poll)) {
                spin_unlock_irq(&ctx->completion_lock);
+               percpu_ref_put(&ctx->refs);
                return;
        }
 
@@ -4682,7 +4794,7 @@ static void io_async_task_func(struct callback_head *cb)
        if (hash_hashed(&req->hash_node))
                hash_del(&req->hash_node);
 
-       io_poll_remove_double(req, apoll->double_poll);
+       io_poll_remove_double(req);
        spin_unlock_irq(&ctx->completion_lock);
 
        if (!READ_ONCE(apoll->poll.canceled))
@@ -4690,6 +4802,7 @@ static void io_async_task_func(struct callback_head *cb)
        else
                __io_req_task_cancel(req, -ECANCELED);
 
+       percpu_ref_put(&ctx->refs);
        kfree(apoll->double_poll);
        kfree(apoll);
 }
@@ -4791,8 +4904,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
 
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
                                        io_async_wake);
-       if (ret) {
-               io_poll_remove_double(req, apoll->double_poll);
+       if (ret || ipt.error) {
+               io_poll_remove_double(req);
                spin_unlock_irq(&ctx->completion_lock);
                kfree(apoll->double_poll);
                kfree(apoll);
@@ -4824,14 +4937,13 @@ static bool io_poll_remove_one(struct io_kiocb *req)
 {
        bool do_complete;
 
+       io_poll_remove_double(req);
+
        if (req->opcode == IORING_OP_POLL_ADD) {
-               io_poll_remove_double(req, req->io);
                do_complete = __io_poll_remove_one(req, &req->poll);
        } else {
                struct async_poll *apoll = req->apoll;
 
-               io_poll_remove_double(req, apoll->double_poll);
-
                /* non-poll requests have submit ref still */
                do_complete = __io_poll_remove_one(req, &apoll->poll);
                if (do_complete) {
@@ -4845,6 +4957,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
                io_cqring_fill_event(req, -ECANCELED);
                io_commit_cqring(req->ctx);
                req->flags |= REQ_F_COMP_LOCKED;
+               req_set_fail_links(req);
                io_put_req(req);
        }
 
@@ -5017,6 +5130,23 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+static int __io_timeout_cancel(struct io_kiocb *req)
+{
+       int ret;
+
+       list_del_init(&req->timeout.list);
+
+       ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
+       if (ret == -1)
+               return -EALREADY;
+
+       req_set_fail_links(req);
+       req->flags |= REQ_F_COMP_LOCKED;
+       io_cqring_fill_event(req, -ECANCELED);
+       io_put_req(req);
+       return 0;
+}
+
 static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
 {
        struct io_kiocb *req;
@@ -5024,7 +5154,6 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
 
        list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
                if (user_data == req->user_data) {
-                       list_del_init(&req->timeout.list);
                        ret = 0;
                        break;
                }
@@ -5033,14 +5162,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
        if (ret == -ENOENT)
                return ret;
 
-       ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
-       if (ret == -1)
-               return -EALREADY;
-
-       req_set_fail_links(req);
-       io_cqring_fill_event(req, -ECANCELED);
-       io_put_req(req);
-       return 0;
+       return __io_timeout_cancel(req);
 }
 
 static int io_timeout_remove_prep(struct io_kiocb *req,
@@ -5481,8 +5603,8 @@ static void __io_clean_op(struct io_kiocb *req)
                case IORING_OP_WRITEV:
                case IORING_OP_WRITE_FIXED:
                case IORING_OP_WRITE:
-                       if (io->rw.iov != io->rw.fast_iov)
-                               kfree(io->rw.iov);
+                       if (io->rw.free_iovec)
+                               kfree(io->rw.free_iovec);
                        break;
                case IORING_OP_RECVMSG:
                case IORING_OP_SENDMSG:
@@ -5497,6 +5619,18 @@ static void __io_clean_op(struct io_kiocb *req)
                }
                req->flags &= ~REQ_F_NEED_CLEANUP;
        }
+
+       if (req->flags & REQ_F_INFLIGHT) {
+               struct io_ring_ctx *ctx = req->ctx;
+               unsigned long flags;
+
+               spin_lock_irqsave(&ctx->inflight_lock, flags);
+               list_del(&req->inflight_entry);
+               if (waitqueue_active(&ctx->inflight_wait))
+                       wake_up(&ctx->inflight_wait);
+               spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+               req->flags &= ~REQ_F_INFLIGHT;
+       }
 }
 
 static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -5917,15 +6051,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static void io_queue_linked_timeout(struct io_kiocb *req)
+static void __io_queue_linked_timeout(struct io_kiocb *req)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
        /*
         * If the list is now empty, then our linked request finished before
         * we got a chance to setup the timer
         */
-       spin_lock_irq(&ctx->completion_lock);
        if (!list_empty(&req->link_list)) {
                struct io_timeout_data *data = &req->io->timeout;
 
@@ -5933,6 +6064,14 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
                hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
                                data->mode);
        }
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+
+       spin_lock_irq(&ctx->completion_lock);
+       __io_queue_linked_timeout(req);
        spin_unlock_irq(&ctx->completion_lock);
 
        /* drop submission reference */
@@ -7818,7 +7957,13 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
                         ACCT_LOCKED);
 
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
-       queue_work(system_wq, &ctx->exit_work);
+       /*
+        * Use system_unbound_wq to avoid spawning tons of event kworkers
+        * if we're exiting a ton of rings at the same time. It just adds
+        * noise and overhead, there's no discernable change in runtime
+        * over using system_wq.
+        */
+       queue_work(system_unbound_wq, &ctx->exit_work);
 }
 
 static int io_uring_release(struct inode *inode, struct file *file)
@@ -7837,6 +7982,98 @@ static bool io_wq_files_match(struct io_wq_work *work, void *data)
        return work->files == files;
 }
 
+/*
+ * Returns true if 'preq' is the link parent of 'req'
+ */
+static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
+{
+       struct io_kiocb *link;
+
+       if (!(preq->flags & REQ_F_LINK_HEAD))
+               return false;
+
+       list_for_each_entry(link, &preq->link_list, link_list) {
+               if (link == req)
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * We're looking to cancel 'req' because it's holding on to our files, but
+ * 'req' could be a link to another request. See if it is, and cancel that
+ * parent request if so.
+ */
+static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req)
+{
+       struct hlist_node *tmp;
+       struct io_kiocb *preq;
+       bool found = false;
+       int i;
+
+       spin_lock_irq(&ctx->completion_lock);
+       for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+               struct hlist_head *list;
+
+               list = &ctx->cancel_hash[i];
+               hlist_for_each_entry_safe(preq, tmp, list, hash_node) {
+                       found = io_match_link(preq, req);
+                       if (found) {
+                               io_poll_remove_one(preq);
+                               break;
+                       }
+               }
+       }
+       spin_unlock_irq(&ctx->completion_lock);
+       return found;
+}
+
+static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
+                                  struct io_kiocb *req)
+{
+       struct io_kiocb *preq;
+       bool found = false;
+
+       spin_lock_irq(&ctx->completion_lock);
+       list_for_each_entry(preq, &ctx->timeout_list, timeout.list) {
+               found = io_match_link(preq, req);
+               if (found) {
+                       __io_timeout_cancel(preq);
+                       break;
+               }
+       }
+       spin_unlock_irq(&ctx->completion_lock);
+       return found;
+}
+
+static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
+{
+       return io_match_link(container_of(work, struct io_kiocb, work), data);
+}
+
+static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+{
+       enum io_wq_cancel cret;
+
+       /* cancel this particular work, if it's running */
+       cret = io_wq_cancel_work(ctx->io_wq, &req->work);
+       if (cret != IO_WQ_CANCEL_NOTFOUND)
+               return;
+
+       /* find links that hold this pending, cancel those */
+       cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
+       if (cret != IO_WQ_CANCEL_NOTFOUND)
+               return;
+
+       /* if we have a poll link holding this pending, cancel that */
+       if (io_poll_remove_link(ctx, req))
+               return;
+
+       /* final option, timeout link is holding this req pending */
+       io_timeout_remove_link(ctx, req);
+}
+
 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                                  struct files_struct *files)
 {
@@ -7868,32 +8105,9 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                /* We need to keep going until we don't find a matching req */
                if (!cancel_req)
                        break;
-
-               if (cancel_req->flags & REQ_F_OVERFLOW) {
-                       spin_lock_irq(&ctx->completion_lock);
-                       list_del(&cancel_req->compl.list);
-                       cancel_req->flags &= ~REQ_F_OVERFLOW;
-
-                       io_cqring_mark_overflow(ctx);
-                       WRITE_ONCE(ctx->rings->cq_overflow,
-                               atomic_inc_return(&ctx->cached_cq_overflow));
-                       io_commit_cqring(ctx);
-                       spin_unlock_irq(&ctx->completion_lock);
-
-                       /*
-                        * Put inflight ref and overflow ref. If that's
-                        * all we had, then we're done with this request.
-                        */
-                       if (refcount_sub_and_test(2, &cancel_req->refs)) {
-                               io_free_req(cancel_req);
-                               finish_wait(&ctx->inflight_wait, &wait);
-                               continue;
-                       }
-               } else {
-                       io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
-                       io_put_req(cancel_req);
-               }
-
+               /* cancel this request, or head link requests */
+               io_attempt_cancel(ctx, cancel_req);
+               io_put_req(cancel_req);
                schedule();
                finish_wait(&ctx->inflight_wait, &wait);
        }
@@ -8171,6 +8385,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
        struct io_rings *rings;
        size_t size, sq_array_offset;
 
+       /* make sure these are sane, as we already accounted them */
+       ctx->sq_entries = p->sq_entries;
+       ctx->cq_entries = p->cq_entries;
+
        size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
        if (size == SIZE_MAX)
                return -EOVERFLOW;
@@ -8187,8 +8405,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
        rings->cq_ring_entries = p->cq_entries;
        ctx->sq_mask = rings->sq_ring_mask;
        ctx->cq_mask = rings->cq_ring_mask;
-       ctx->sq_entries = rings->sq_ring_entries;
-       ctx->cq_entries = rings->cq_ring_entries;
 
        size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
        if (size == SIZE_MAX) {
@@ -8317,6 +8533,16 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        ctx->user = user;
        ctx->creds = get_current_cred();
 
+       /*
+        * Account memory _before_ installing the file descriptor. Once
+        * the descriptor is installed, it can get closed at any time. Also
+        * do this before hitting the general error path, as ring freeing
+        * will un-account as well.
+        */
+       io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
+                      ACCT_LOCKED);
+       ctx->limit_mem = limit_mem;
+
        ret = io_allocate_scq_urings(ctx, p);
        if (ret)
                goto err;
@@ -8354,14 +8580,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        }
 
        /*
-        * Account memory _before_ installing the file descriptor. Once
-        * the descriptor is installed, it can get closed at any time.
-        */
-       io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
-                      ACCT_LOCKED);
-       ctx->limit_mem = limit_mem;
-
-       /*
         * Install ring fd as the very last thing, so we don't risk someone
         * having closed it before we finish setup
         */
index e494443..17fdc48 100644 (file)
@@ -1285,7 +1285,7 @@ journal_t *jbd2_journal_init_inode(struct inode *inode)
  * superblock as being NULL to prevent the journal destroy from writing
  * back a bogus superblock.
  */
-static void journal_fail_superblock (journal_t *journal)
+static void journal_fail_superblock(journal_t *journal)
 {
        struct buffer_head *bh = journal->j_sb_buffer;
        brelse(bh);
@@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
        int ret;
 
        /* Buffer got discarded which means block device got invalidated */
-       if (!buffer_mapped(bh))
+       if (!buffer_mapped(bh)) {
+               unlock_buffer(bh);
                return -EIO;
+       }
 
        trace_jbd2_write_superblock(journal, write_flags);
        if (!(journal->j_flags & JBD2_BARRIER))
@@ -1815,7 +1817,7 @@ int jbd2_journal_destroy(journal_t *journal)
 
 
 /**
- *int jbd2_journal_check_used_features () - Check if features specified are used.
+ *int jbd2_journal_check_used_features() - Check if features specified are used.
  * @journal: Journal to check.
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
@@ -1825,7 +1827,7 @@ int jbd2_journal_destroy(journal_t *journal)
  * features.  Return true (non-zero) if it does.
  **/
 
-int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat,
                                 unsigned long ro, unsigned long incompat)
 {
        journal_superblock_t *sb;
@@ -1860,7 +1862,7 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
  * all of a given set of features on this journal.  Return true
  * (non-zero) if it can. */
 
-int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat,
                                      unsigned long ro, unsigned long incompat)
 {
        if (!compat && !ro && !incompat)
@@ -1882,7 +1884,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
 }
 
 /**
- * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
+ * int jbd2_journal_set_features() - Mark a given journal feature in the superblock
  * @journal: Journal to act on.
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
@@ -1893,7 +1895,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
  *
  */
 
-int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+int jbd2_journal_set_features(journal_t *journal, unsigned long compat,
                          unsigned long ro, unsigned long incompat)
 {
 #define INCOMPAT_FEATURE_ON(f) \
index 2ed278f..faa97d7 100644 (file)
@@ -690,14 +690,11 @@ static int do_one_pass(journal_t *journal,
                         * number. */
                        if (pass == PASS_SCAN &&
                            jbd2_has_feature_checksum(journal)) {
-                               int chksum_err, chksum_seen;
                                struct commit_header *cbh =
                                        (struct commit_header *)bh->b_data;
                                unsigned found_chksum =
                                        be32_to_cpu(cbh->h_chksum[0]);
 
-                               chksum_err = chksum_seen = 0;
-
                                if (info->end_transaction) {
                                        journal->j_failed_commit =
                                                info->end_transaction;
@@ -705,42 +702,23 @@ static int do_one_pass(journal_t *journal,
                                        break;
                                }
 
-                               if (crc32_sum == found_chksum &&
-                                   cbh->h_chksum_type == JBD2_CRC32_CHKSUM &&
-                                   cbh->h_chksum_size ==
-                                               JBD2_CRC32_CHKSUM_SIZE)
-                                      chksum_seen = 1;
-                               else if (!(cbh->h_chksum_type == 0 &&
-                                            cbh->h_chksum_size == 0 &&
-                                            found_chksum == 0 &&
-                                            !chksum_seen))
-                               /*
-                                * If fs is mounted using an old kernel and then
-                                * kernel with journal_chksum is used then we
-                                * get a situation where the journal flag has
-                                * checksum flag set but checksums are not
-                                * present i.e chksum = 0, in the individual
-                                * commit blocks.
-                                * Hence to avoid checksum failures, in this
-                                * situation, this extra check is added.
-                                */
-                                               chksum_err = 1;
-
-                               if (chksum_err) {
-                                       info->end_transaction = next_commit_ID;
-
-                                       if (!jbd2_has_feature_async_commit(journal)) {
-                                               journal->j_failed_commit =
-                                                       next_commit_ID;
-                                               brelse(bh);
-                                               break;
-                                       }
-                               }
+                               /* Neither checksum match nor unused? */
+                               if (!((crc32_sum == found_chksum &&
+                                      cbh->h_chksum_type ==
+                                               JBD2_CRC32_CHKSUM &&
+                                      cbh->h_chksum_size ==
+                                               JBD2_CRC32_CHKSUM_SIZE) ||
+                                     (cbh->h_chksum_type == 0 &&
+                                      cbh->h_chksum_size == 0 &&
+                                      found_chksum == 0)))
+                                       goto chksum_error;
+
                                crc32_sum = ~0;
                        }
                        if (pass == PASS_SCAN &&
                            !jbd2_commit_block_csum_verify(journal,
                                                           bh->b_data)) {
+                       chksum_error:
                                info->end_transaction = next_commit_ID;
 
                                if (!jbd2_has_feature_async_commit(journal)) {
index e91aad3..4398573 100644 (file)
@@ -2026,6 +2026,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
  */
 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
 {
+       J_ASSERT_JH(jh, jh->b_transaction != NULL);
+       J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+
        __jbd2_journal_temp_unlink_buffer(jh);
        jh->b_transaction = NULL;
 }
@@ -2078,10 +2081,6 @@ out:
  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
  * @journal: journal for operation
  * @page: to try and free
- * @gfp_mask: we use the mask to detect how hard should we try to release
- * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit
- * code to release the buffers.
- *
  *
  * For all the buffers on this page,
  * if they are fully written out ordered data, move them onto BUF_CLEAN
@@ -2112,11 +2111,11 @@ out:
  *
  * Return 0 on failure, 1 on success
  */
-int jbd2_journal_try_to_free_buffers(journal_t *journal,
-                               struct page *page, gfp_t gfp_mask)
+int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
 {
        struct buffer_head *head;
        struct buffer_head *bh;
+       bool has_write_io_error = false;
        int ret = 0;
 
        J_ASSERT(PageLocked(page));
@@ -2141,11 +2140,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
                jbd2_journal_put_journal_head(jh);
                if (buffer_jbd(bh))
                        goto busy;
+
+               /*
+                * If we free a metadata buffer which has been failed to
+                * write out, the jbd2 checkpoint procedure will not detect
+                * this failure and may lead to filesystem inconsistency
+                * after cleanup journal tail.
+                */
+               if (buffer_write_io_error(bh)) {
+                       pr_err("JBD2: Error while async write back metadata bh %llu.",
+                              (unsigned long long)bh->b_blocknr);
+                       has_write_io_error = true;
+               }
        } while ((bh = bh->b_this_page) != head);
 
        ret = try_to_free_buffers(page);
 
 busy:
+       if (has_write_io_error)
+               jbd2_journal_abort(journal, -EIO);
+
        return ret;
 }
 
@@ -2572,6 +2586,13 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh)
 
        was_dirty = test_clear_buffer_jbddirty(bh);
        __jbd2_journal_temp_unlink_buffer(jh);
+
+       /*
+        * b_transaction must be set, otherwise the new b_transaction won't
+        * be holding jh reference
+        */
+       J_ASSERT_JH(jh, jh->b_transaction != NULL);
+
        /*
         * We set b_transaction here because b_next_transaction will inherit
         * our jh reference and thus __jbd2_journal_file_buffer() must not
index f20cff1..7764937 100644 (file)
@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
        int ret;
        uint32_t now = JFFS2_NOW();
 
+       mutex_lock(&f->sem);
        for (fd = f->dents ; fd; fd = fd->next) {
-               if (fd->ino)
+               if (fd->ino) {
+                       mutex_unlock(&f->sem);
                        return -ENOTEMPTY;
+               }
        }
+       mutex_unlock(&f->sem);
 
        ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
                              dentry->d_name.len, f, now);
index 5f7e284..db72a9d 100644 (file)
@@ -261,7 +261,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
        }
 #endif
        if (c->nr_erasing_blocks) {
-               if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
+               if (!c->used_size && !c->unchecked_size &&
+                       ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) {
                        pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
                        pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
                                  empty_blocks, bad_blocks, c->nr_blocks);
index 938fe32..8fc0542 100644 (file)
@@ -1808,6 +1808,9 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
 
        if (flags & FL_LAYOUT)
                return 0;
+       if (flags & FL_DELEG)
+               /* We leave these checks to the caller. */
+               return 0;
 
        if (arg == F_RDLCK)
                return inode_is_open_for_write(inode) ? -EAGAIN : 0;
index 7cb5fd3..7b09a91 100644 (file)
@@ -150,6 +150,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
        return 0;
 }
 
+static bool minix_check_superblock(struct super_block *sb)
+{
+       struct minix_sb_info *sbi = minix_sb(sb);
+
+       if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
+               return false;
+
+       /*
+        * s_max_size must not exceed the block mapping limitation.  This check
+        * is only needed for V1 filesystems, since V2/V3 support an extra level
+        * of indirect blocks which places the limit well above U32_MAX.
+        */
+       if (sbi->s_version == MINIX_V1 &&
+           sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
+               return false;
+
+       return true;
+}
+
 static int minix_fill_super(struct super_block *s, void *data, int silent)
 {
        struct buffer_head *bh;
@@ -185,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
        sbi->s_zmap_blocks = ms->s_zmap_blocks;
        sbi->s_firstdatazone = ms->s_firstdatazone;
        sbi->s_log_zone_size = ms->s_log_zone_size;
-       sbi->s_max_size = ms->s_max_size;
+       s->s_maxbytes = ms->s_max_size;
        s->s_magic = ms->s_magic;
        if (s->s_magic == MINIX_SUPER_MAGIC) {
                sbi->s_version = MINIX_V1;
@@ -216,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
                sbi->s_zmap_blocks = m3s->s_zmap_blocks;
                sbi->s_firstdatazone = m3s->s_firstdatazone;
                sbi->s_log_zone_size = m3s->s_log_zone_size;
-               sbi->s_max_size = m3s->s_max_size;
+               s->s_maxbytes = m3s->s_max_size;
                sbi->s_ninodes = m3s->s_ninodes;
                sbi->s_nzones = m3s->s_zones;
                sbi->s_dirsize = 64;
@@ -228,11 +247,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
        } else
                goto out_no_fs;
 
+       if (!minix_check_superblock(s))
+               goto out_illegal_sb;
+
        /*
         * Allocate the buffer map to keep the superblock small.
         */
-       if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
-               goto out_illegal_sb;
        i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
        map = kzalloc(i, GFP_KERNEL);
        if (!map)
@@ -468,6 +488,13 @@ static struct inode *V1_minix_iget(struct inode *inode)
                iget_failed(inode);
                return ERR_PTR(-EIO);
        }
+       if (raw_inode->i_nlinks == 0) {
+               printk("MINIX-fs: deleted inode referenced: %lu\n",
+                      inode->i_ino);
+               brelse(bh);
+               iget_failed(inode);
+               return ERR_PTR(-ESTALE);
+       }
        inode->i_mode = raw_inode->i_mode;
        i_uid_write(inode, raw_inode->i_uid);
        i_gid_write(inode, raw_inode->i_gid);
@@ -501,6 +528,13 @@ static struct inode *V2_minix_iget(struct inode *inode)
                iget_failed(inode);
                return ERR_PTR(-EIO);
        }
+       if (raw_inode->i_nlinks == 0) {
+               printk("MINIX-fs: deleted inode referenced: %lu\n",
+                      inode->i_ino);
+               brelse(bh);
+               iget_failed(inode);
+               return ERR_PTR(-ESTALE);
+       }
        inode->i_mode = raw_inode->i_mode;
        i_uid_write(inode, raw_inode->i_uid);
        i_gid_write(inode, raw_inode->i_gid);
index 043c3fd..4461487 100644 (file)
@@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode,
        int n = 0;
        int i;
        int parent = minix_new_block(inode);
+       int err = -ENOSPC;
 
        branch[0].key = cpu_to_block(parent);
        if (parent) for (n = 1; n < num; n++) {
@@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode,
                        break;
                branch[n].key = cpu_to_block(nr);
                bh = sb_getblk(inode->i_sb, parent);
+               if (!bh) {
+                       minix_free_block(inode, nr);
+                       err = -ENOMEM;
+                       break;
+               }
                lock_buffer(bh);
                memset(bh->b_data, 0, bh->b_size);
                branch[n].bh = bh;
@@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode,
                bforget(branch[i].bh);
        for (i = 0; i < n; i++)
                minix_free_block(inode, block_to_cpu(branch[i].key));
-       return -ENOSPC;
+       return err;
 }
 
 static inline int splice_branch(struct inode *inode,
index 046cc96..1fed906 100644 (file)
@@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
        if (block < 0) {
                printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
                        block, inode->i_sb->s_bdev);
-       } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
-               if (printk_ratelimit())
-                       printk("MINIX-fs: block_to_path: "
-                              "block %ld too big on dev %pg\n",
-                               block, inode->i_sb->s_bdev);
-       } else if (block < 7) {
+               return 0;
+       }
+       if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
+               return 0;
+
+       if (block < 7) {
                offsets[n++] = block;
        } else if ((block -= 7) < 512) {
                offsets[n++] = 7;
index f7fc7ec..9d00f31 100644 (file)
@@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
        if (block < 0) {
                printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
                        block, sb->s_bdev);
-       } else if ((u64)block * (u64)sb->s_blocksize >=
-                       minix_sb(sb)->s_max_size) {
-               if (printk_ratelimit())
-                       printk("MINIX-fs: block_to_path: "
-                              "block %ld too big on dev %pg\n",
-                               block, sb->s_bdev);
-       } else if (block < DIRCOUNT) {
+               return 0;
+       }
+       if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
+               return 0;
+
+       if (block < DIRCOUNT) {
                offsets[n++] = block;
        } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
                offsets[n++] = DIRCOUNT;
index df081e8..168d45d 100644 (file)
@@ -32,7 +32,6 @@ struct minix_sb_info {
        unsigned long s_zmap_blocks;
        unsigned long s_firstdatazone;
        unsigned long s_log_zone_size;
-       unsigned long s_max_size;
        int s_dirsize;
        int s_namelen;
        struct buffer_head ** s_imap;
index fde8fe0..e99e2a9 100644 (file)
@@ -2851,16 +2851,24 @@ static int may_open(const struct path *path, int acc_mode, int flag)
        case S_IFDIR:
                if (acc_mode & MAY_WRITE)
                        return -EISDIR;
+               if (acc_mode & MAY_EXEC)
+                       return -EACCES;
                break;
        case S_IFBLK:
        case S_IFCHR:
                if (!may_open_dev(path))
                        return -EACCES;
-               /*FALLTHRU*/
+               fallthrough;
        case S_IFIFO:
        case S_IFSOCK:
+               if (acc_mode & MAY_EXEC)
+                       return -EACCES;
                flag &= ~O_TRUNC;
                break;
+       case S_IFREG:
+               if ((acc_mode & MAY_EXEC) && path_noexec(path))
+                       return -EACCES;
+               break;
        }
 
        error = inode_permission(inode, MAY_OPEN | acc_mode);
@@ -3770,11 +3778,11 @@ exit2:
        mnt_drop_write(path.mnt);
 exit1:
        path_put(&path);
-       putname(name);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
                goto retry;
        }
+       putname(name);
        return error;
 }
 
index 2433c3e..22d11fd 100644 (file)
@@ -30,7 +30,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o
 nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o
 nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
 nfsv4-$(CONFIG_NFS_V4_1)       += pnfs.o pnfs_dev.o pnfs_nfs.o
-nfsv4-$(CONFIG_NFS_V4_2)       += nfs42proc.o
+nfsv4-$(CONFIG_NFS_V4_2)       += nfs42proc.o nfs42xattr.o
 
 obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/
 obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
index 9fb067a..ef9db13 100644 (file)
@@ -79,7 +79,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
                goto out_free_data;
 
        bl_msg = msg->data;
-       bl_msg->type = BL_DEVICE_MOUNT,
+       bl_msg->type = BL_DEVICE_MOUNT;
        bl_msg->totallen = b->simple.len;
        nfs4_encode_simple(msg->data + sizeof(*bl_msg), b);
 
index f1ff307..4b8cc93 100644 (file)
@@ -50,6 +50,7 @@
 #include "nfs.h"
 #include "netns.h"
 #include "sysfs.h"
+#include "nfs42.h"
 
 #define NFSDBG_FACILITY                NFSDBG_CLIENT
 
@@ -749,7 +750,7 @@ error:
 static void nfs_server_set_fsinfo(struct nfs_server *server,
                                  struct nfs_fsinfo *fsinfo)
 {
-       unsigned long max_rpc_payload;
+       unsigned long max_rpc_payload, raw_max_rpc_payload;
 
        /* Work out a lot of parameters */
        if (server->rsize == 0)
@@ -762,7 +763,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
        if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax)
                server->wsize = nfs_block_size(fsinfo->wtmax, NULL);
 
-       max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL);
+       raw_max_rpc_payload = rpc_max_payload(server->client);
+       max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL);
+
        if (server->rsize > max_rpc_payload)
                server->rsize = max_rpc_payload;
        if (server->rsize > NFS_MAX_FILE_IO_SIZE)
@@ -795,6 +798,21 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
        server->clone_blksize = fsinfo->clone_blksize;
        /* We're airborne Set socket buffersize */
        rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
+
+#ifdef CONFIG_NFS_V4_2
+       /*
+        * Defaults until limited by the session parameters.
+        */
+       server->gxasize = min_t(unsigned int, raw_max_rpc_payload,
+                               XATTR_SIZE_MAX);
+       server->sxasize = min_t(unsigned int, raw_max_rpc_payload,
+                               XATTR_SIZE_MAX);
+       server->lxasize = min_t(unsigned int, raw_max_rpc_payload,
+                               nfs42_listxattr_xdrsize(XATTR_LIST_MAX));
+
+       if (fsinfo->xattr_support)
+               server->caps |= NFS_CAP_XATTR;
+#endif
 }
 
 /*
index 5a331da..a12f42e 100644 (file)
@@ -2460,7 +2460,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co
        return NULL;
 }
 
-static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
+static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs_access_entry *cache;
@@ -2533,6 +2533,20 @@ out:
        return err;
 }
 
+int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct
+nfs_access_entry *res, bool may_block)
+{
+       int status;
+
+       status = nfs_access_get_cached_rcu(inode, cred, res);
+       if (status != 0)
+               status = nfs_access_get_cached_locked(inode, cred, res,
+                   may_block);
+
+       return status;
+}
+EXPORT_SYMBOL_GPL(nfs_access_get_cached);
+
 static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -2647,9 +2661,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
 
        trace_nfs_access_enter(inode);
 
-       status = nfs_access_get_cached_rcu(inode, cred, &cache);
-       if (status != 0)
-               status = nfs_access_get_cached(inode, cred, &cache, may_block);
+       status = nfs_access_get_cached(inode, cred, &cache, may_block);
        if (status == 0)
                goto out_cached;
 
@@ -2661,6 +2673,10 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
         * Determine which access bits we want to ask for...
         */
        cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
+       if (nfs_server_capable(inode, NFS_CAP_XATTR)) {
+               cache.mask |= NFS_ACCESS_XAREAD | NFS_ACCESS_XAWRITE |
+                   NFS_ACCESS_XALIST;
+       }
        if (S_ISDIR(inode->i_mode))
                cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
        else
index 1b79dd5..2d30a4d 100644 (file)
@@ -896,7 +896,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  */
 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 {
-       ssize_t result = -EINVAL, requested;
+       ssize_t result, requested;
        size_t count;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
index f96367a..63940a7 100644 (file)
@@ -140,6 +140,7 @@ static int
 nfs_file_flush(struct file *file, fl_owner_t id)
 {
        struct inode    *inode = file_inode(file);
+       errseq_t since;
 
        dprintk("NFS: flush(%pD2)\n", file);
 
@@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
                return 0;
 
        /* Flush writes to the server and return any errors */
-       return nfs_wb_all(inode);
+       since = filemap_sample_wb_err(file->f_mapping);
+       nfs_wb_all(inode);
+       return filemap_check_wb_err(file->f_mapping, since);
 }
 
 ssize_t
@@ -587,12 +590,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
        .page_mkwrite = nfs_vm_page_mkwrite,
 };
 
-static int nfs_need_check_write(struct file *filp, struct inode *inode)
+static int nfs_need_check_write(struct file *filp, struct inode *inode,
+                               int error)
 {
        struct nfs_open_context *ctx;
 
        ctx = nfs_file_open_context(filp);
-       if (nfs_ctx_key_to_expire(ctx, inode))
+       if (nfs_error_is_fatal_on_server(error) ||
+           nfs_ctx_key_to_expire(ctx, inode))
                return 1;
        return 0;
 }
@@ -603,6 +608,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(file);
        unsigned long written = 0;
        ssize_t result;
+       errseq_t since;
+       int error;
 
        result = nfs_key_timeout_notify(file, inode);
        if (result)
@@ -627,6 +634,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (iocb->ki_pos > i_size_read(inode))
                nfs_revalidate_mapping(inode, file->f_mapping);
 
+       since = filemap_sample_wb_err(file->f_mapping);
        nfs_start_io_write(inode);
        result = generic_write_checks(iocb, from);
        if (result > 0) {
@@ -645,7 +653,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
                goto out;
 
        /* Return error values */
-       if (nfs_need_check_write(file, inode)) {
+       error = filemap_check_wb_err(file->f_mapping, since);
+       if (nfs_need_check_write(file, inode, error)) {
                int err = nfs_wb_all(inode);
                if (err < 0)
                        result = err;
index de03e44..9651455 100644 (file)
@@ -790,6 +790,19 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
        return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 }
 
+static struct nfs4_pnfs_ds *
+ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, int *best_idx)
+{
+       struct pnfs_layout_segment *lseg = pgio->pg_lseg;
+       struct nfs4_pnfs_ds *ds;
+
+       ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
+                                              best_idx);
+       if (ds || !pgio->pg_mirror_idx)
+               return ds;
+       return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+}
+
 static void
 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
                      struct nfs_page *req,
@@ -840,12 +853,11 @@ retry:
                        goto out_nolseg;
        }
 
-       ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
+       ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
        if (!ds) {
                if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                        goto out_mds;
-               pnfs_put_lseg(pgio->pg_lseg);
-               pgio->pg_lseg = NULL;
+               pnfs_generic_pg_cleanup(pgio);
                /* Sleep for 1 second before retrying */
                ssleep(1);
                goto retry;
@@ -871,8 +883,6 @@ out_mds:
                        0, NFS4_MAX_UINT64, IOMODE_READ,
                        NFS_I(pgio->pg_inode)->layout,
                        pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
        pgio->pg_maxretrans = 0;
        nfs_pageio_reset_read_mds(pgio);
 }
@@ -916,8 +926,7 @@ retry:
                if (!ds) {
                        if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                                goto out_mds;
-                       pnfs_put_lseg(pgio->pg_lseg);
-                       pgio->pg_lseg = NULL;
+                       pnfs_generic_pg_cleanup(pgio);
                        /* Sleep for 1 second before retrying */
                        ssleep(1);
                        goto retry;
@@ -939,8 +948,6 @@ out_mds:
                        0, NFS4_MAX_UINT64, IOMODE_RW,
                        NFS_I(pgio->pg_inode)->layout,
                        pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
        pgio->pg_maxretrans = 0;
        nfs_pageio_reset_write_mds(pgio);
        pgio->pg_error = -EAGAIN;
@@ -953,8 +960,8 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
        if (!pgio->pg_lseg) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   nfs_req_openctx(req),
-                                                  0,
-                                                  NFS4_MAX_UINT64,
+                                                  req_offset(req),
+                                                  req->wb_bytes,
                                                   IOMODE_RW,
                                                   false,
                                                   GFP_NOFS);
@@ -1028,11 +1035,24 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
        }
 }
 
+static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
+{
+       u32 idx = hdr->pgio_mirror_idx + 1;
+       int new_idx = 0;
+
+       if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx))
+               ff_layout_send_layouterror(hdr->lseg);
+       else
+               pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
+       pnfs_read_resend_pnfs(hdr, new_idx);
+}
+
 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
 {
        struct rpc_task *task = &hdr->task;
 
        pnfs_layoutcommit_inode(hdr->inode, false);
+       pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
 
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                dprintk("%s Reset task %5u for i/o through MDS "
@@ -1234,6 +1254,12 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
                break;
        case NFS4ERR_NXIO:
                ff_layout_mark_ds_unreachable(lseg, idx);
+               /*
+                * Don't return the layout if this is a read and we still
+                * have layouts to try
+                */
+               if (opnum == OP_READ)
+                       break;
                /* Fallthrough */
        default:
                pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
@@ -1247,7 +1273,6 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 static int ff_layout_read_done_cb(struct rpc_task *task,
                                struct nfs_pgio_header *hdr)
 {
-       int new_idx = hdr->pgio_mirror_idx;
        int err;
 
        if (task->tk_status < 0) {
@@ -1267,10 +1292,6 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               if (ff_layout_choose_best_ds_for_read(hdr->lseg,
-                                       hdr->pgio_mirror_idx + 1,
-                                       &new_idx))
-                       goto out_layouterror;
                set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
@@ -1281,10 +1302,6 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        }
 
        return 0;
-out_layouterror:
-       ff_layout_read_record_layoutstats_done(task, hdr);
-       ff_layout_send_layouterror(hdr->lseg);
-       hdr->pgio_mirror_idx = new_idx;
 out_eagain:
        rpc_restart_call_prepare(task);
        return -EAGAIN;
@@ -1411,10 +1428,9 @@ static void ff_layout_read_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
-       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
-               ff_layout_send_layouterror(hdr->lseg);
-               pnfs_read_resend_pnfs(hdr);
-       } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_resend_pnfs_read(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
                ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }
index ccc88be..66949da 100644 (file)
@@ -982,7 +982,7 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
                /*
                 * The legacy version 6 binary mount data from userspace has a
                 * field used only to transport selinux information into the
-                * the kernel.  To continue to support that functionality we
+                * kernel.  To continue to support that functionality we
                 * have a touch of selinux knowledge here in the NFS code. The
                 * userspace code converted context=blah to just blah so we are
                 * converting back to the full string selinux understands.
index 0bf1f83..aa64939 100644 (file)
@@ -193,6 +193,7 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
 
        return nfs_check_cache_invalid_not_delegated(inode, flags);
 }
+EXPORT_SYMBOL_GPL(nfs_check_cache_invalid);
 
 static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
 {
@@ -204,7 +205,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
                        flags &= ~NFS_INO_INVALID_OTHER;
                flags &= ~(NFS_INO_INVALID_CHANGE
                                | NFS_INO_INVALID_SIZE
-                               | NFS_INO_REVAL_PAGECACHE);
+                               | NFS_INO_REVAL_PAGECACHE
+                               | NFS_INO_INVALID_XATTR);
        }
 
        if (inode->i_mapping->nrpages == 0)
@@ -233,11 +235,13 @@ static void nfs_zap_caches_locked(struct inode *inode)
                                        | NFS_INO_INVALID_DATA
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
+                                       | NFS_INO_INVALID_XATTR
                                        | NFS_INO_REVAL_PAGECACHE);
        } else
                nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
+                                       | NFS_INO_INVALID_XATTR
                                        | NFS_INO_REVAL_PAGECACHE);
        nfs_zap_label_cache_locked(nfsi);
 }
@@ -542,6 +546,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                        inode->i_gid = fattr->gid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
                        nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
+               if (nfs_server_capable(inode, NFS_CAP_XATTR))
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
                if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
                        inode->i_blocks = fattr->du.nfs2.blocks;
                if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -794,8 +800,10 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
 
        trace_nfs_getattr_enter(inode);
 
-       if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync)
+       if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
+               nfs_readdirplus_parent_cache_hit(path->dentry);
                goto out_no_update;
+       }
 
        /* Flush out writes to the server in order to update c/mtime.  */
        if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
@@ -1375,6 +1383,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                inode_set_iversion_raw(inode, fattr->change_attr);
                if (S_ISDIR(inode->i_mode))
                        nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
+               else if (nfs_server_capable(inode, NFS_CAP_XATTR))
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
        }
        /* If we have atomic WCC data, we may update some attributes */
        ts = inode->i_ctime;
@@ -1892,7 +1902,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if (!(have_writers || have_delegation)) {
                                invalid |= NFS_INO_INVALID_DATA
                                        | NFS_INO_INVALID_ACCESS
-                                       | NFS_INO_INVALID_ACL;
+                                       | NFS_INO_INVALID_ACL
+                                       | NFS_INO_INVALID_XATTR;
                                /* Force revalidate of all attributes */
                                save_cache_validity |= NFS_INO_INVALID_CTIME
                                        | NFS_INO_INVALID_MTIME
@@ -2095,6 +2106,9 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
 #if IS_ENABLED(CONFIG_NFS_V4)
        nfsi->nfs4_acl = NULL;
 #endif /* CONFIG_NFS_V4 */
+#ifdef CONFIG_NFS_V4_2
+       nfsi->xattr_cache = NULL;
+#endif
        return &nfsi->vfs_inode;
 }
 EXPORT_SYMBOL_GPL(nfs_alloc_inode);
index c891af9..0fe5aac 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __LINUX_FS_NFS_NFS4_2_H
 #define __LINUX_FS_NFS_NFS4_2_H
 
+#include <linux/xattr.h>
+
 /*
  * FIXME:  four LAYOUTSTATS calls per compound at most! Do we need to support
  * more? Need to consider not to pre-alloc too much for a compound.
@@ -36,5 +38,27 @@ static inline bool nfs42_files_from_same_server(struct file *in,
        return nfs4_check_serverowner_major_id(c_in->cl_serverowner,
                                               c_out->cl_serverowner);
 }
+
+ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+                           void *buf, size_t buflen);
+int nfs42_proc_setxattr(struct inode *inode, const char *name,
+                       const void *buf, size_t buflen, int flags);
+ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
+                              size_t buflen, u64 *cookiep, bool *eofp);
+int nfs42_proc_removexattr(struct inode *inode, const char *name);
+
+/*
+ * Maximum XDR buffer size needed for a listxattr buffer of buflen size.
+ *
+ * The upper boundary is a buffer with all 1-byte sized attribute names.
+ * They would be 7 bytes long in the eventual buffer ("user.x\0"), and
+ * 8 bytes long XDR-encoded.
+ *
+ * Include the trailing eof word as well.
+ */
+static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
+{
+       return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
+}
 #endif /* CONFIG_NFS_V4_2 */
 #endif /* __LINUX_FS_NFS_NFS4_2_H */
index e2ae54b..142225f 100644 (file)
@@ -17,6 +17,7 @@
 #include "nfs4session.h"
 #include "internal.h"
 #include "delegation.h"
+#include "nfs4trace.h"
 
 #define NFSDBG_FACILITY NFSDBG_PROC
 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
@@ -714,7 +715,7 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
 
        switch (task->tk_status) {
        case 0:
-               break;
+               return;
        case -NFS4ERR_BADHANDLE:
        case -ESTALE:
                pnfs_destroy_layout(NFS_I(inode));
@@ -760,6 +761,8 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
        case -EOPNOTSUPP:
                NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
        }
+
+       trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
 }
 
 static void
@@ -882,7 +885,7 @@ nfs42_layouterror_done(struct rpc_task *task, void *calldata)
 
        switch (task->tk_status) {
        case 0:
-               break;
+               return;
        case -NFS4ERR_BADHANDLE:
        case -ESTALE:
                pnfs_destroy_layout(NFS_I(inode));
@@ -926,6 +929,9 @@ nfs42_layouterror_done(struct rpc_task *task, void *calldata)
        case -EOPNOTSUPP:
                NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
        }
+
+       trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
+                              task->tk_status);
 }
 
 static void
@@ -1088,3 +1094,251 @@ out_put_src_lock:
        nfs_put_lock_context(src_lock);
        return err;
 }
+
+#define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
+
+static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
+{
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct nfs42_removexattrargs args = {
+               .fh = NFS_FH(inode),
+               .xattr_name = name,
+       };
+       struct nfs42_removexattrres res;
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
+               .rpc_argp = &args,
+               .rpc_resp = &res,
+       };
+       int ret;
+       unsigned long timestamp = jiffies;
+
+       ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
+           &res.seq_res, 1);
+       if (!ret)
+               nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
+
+       return ret;
+}
+
+static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
+                               const void *buf, size_t buflen, int flags)
+{
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct page *pages[NFS4XATTR_MAXPAGES];
+       struct nfs42_setxattrargs arg = {
+               .fh             = NFS_FH(inode),
+               .xattr_pages    = pages,
+               .xattr_len      = buflen,
+               .xattr_name     = name,
+               .xattr_flags    = flags,
+       };
+       struct nfs42_setxattrres res;
+       struct rpc_message msg = {
+               .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
+               .rpc_argp       = &arg,
+               .rpc_resp       = &res,
+       };
+       int ret, np;
+       unsigned long timestamp = jiffies;
+
+       if (buflen > server->sxasize)
+               return -ERANGE;
+
+       if (buflen > 0) {
+               np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
+               if (np < 0)
+                       return np;
+       } else
+               np = 0;
+
+       ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
+           &res.seq_res, 1);
+
+       for (; np > 0; np--)
+               put_page(pages[np - 1]);
+
+       if (!ret)
+               nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
+
+       return ret;
+}
+
+static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
+                               void *buf, size_t buflen)
+{
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct page *pages[NFS4XATTR_MAXPAGES] = {};
+       struct nfs42_getxattrargs arg = {
+               .fh             = NFS_FH(inode),
+               .xattr_pages    = pages,
+               .xattr_len      = buflen,
+               .xattr_name     = name,
+       };
+       struct nfs42_getxattrres res;
+       struct rpc_message msg = {
+               .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
+               .rpc_argp       = &arg,
+               .rpc_resp       = &res,
+       };
+       int ret, np;
+
+       ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
+           &res.seq_res, 0);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Normally, the caching is done one layer up, but for successful
+        * RPCS, always cache the result here, even if the caller was
+        * just querying the length, or if the reply was too big for
+        * the caller. This avoids a second RPC in the case of the
+        * common query-alloc-retrieve cycle for xattrs.
+        *
+        * Note that xattr_len is always capped to XATTR_SIZE_MAX.
+        */
+
+       nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
+
+       if (buflen) {
+               if (res.xattr_len > buflen)
+                       return -ERANGE;
+               _copy_from_pages(buf, pages, 0, res.xattr_len);
+       }
+
+       np = DIV_ROUND_UP(res.xattr_len, PAGE_SIZE);
+       while (--np >= 0)
+               __free_page(pages[np]);
+
+       return res.xattr_len;
+}
+
+static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
+                                size_t buflen, u64 *cookiep, bool *eofp)
+{
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct page **pages;
+       struct nfs42_listxattrsargs arg = {
+               .fh             = NFS_FH(inode),
+               .cookie         = *cookiep,
+       };
+       struct nfs42_listxattrsres res = {
+               .eof = false,
+               .xattr_buf = buf,
+               .xattr_len = buflen,
+       };
+       struct rpc_message msg = {
+               .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
+               .rpc_argp       = &arg,
+               .rpc_resp       = &res,
+       };
+       u32 xdrlen;
+       int ret, np;
+
+
+       res.scratch = alloc_page(GFP_KERNEL);
+       if (!res.scratch)
+               return -ENOMEM;
+
+       xdrlen = nfs42_listxattr_xdrsize(buflen);
+       if (xdrlen > server->lxasize)
+               xdrlen = server->lxasize;
+       np = xdrlen / PAGE_SIZE + 1;
+
+       pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
+       if (pages == NULL) {
+               __free_page(res.scratch);
+               return -ENOMEM;
+       }
+
+       arg.xattr_pages = pages;
+       arg.count = xdrlen;
+
+       ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
+           &res.seq_res, 0);
+
+       if (ret >= 0) {
+               ret = res.copied;
+               *cookiep = res.cookie;
+               *eofp = res.eof;
+       }
+
+       while (--np >= 0) {
+               if (pages[np])
+                       __free_page(pages[np]);
+       }
+
+       __free_page(res.scratch);
+       kfree(pages);
+
+       return ret;
+
+}
+
+ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
+                             void *buf, size_t buflen)
+{
+       struct nfs4_exception exception = { };
+       ssize_t err;
+
+       do {
+               err = _nfs42_proc_getxattr(inode, name, buf, buflen);
+               if (err >= 0)
+                       break;
+               err = nfs4_handle_exception(NFS_SERVER(inode), err,
+                               &exception);
+       } while (exception.retry);
+
+       return err;
+}
+
+int nfs42_proc_setxattr(struct inode *inode, const char *name,
+                             const void *buf, size_t buflen, int flags)
+{
+       struct nfs4_exception exception = { };
+       int err;
+
+       do {
+               err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
+               if (!err)
+                       break;
+               err = nfs4_handle_exception(NFS_SERVER(inode), err,
+                               &exception);
+       } while (exception.retry);
+
+       return err;
+}
+
+ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
+                             size_t buflen, u64 *cookiep, bool *eofp)
+{
+       struct nfs4_exception exception = { };
+       ssize_t err;
+
+       do {
+               err = _nfs42_proc_listxattrs(inode, buf, buflen,
+                   cookiep, eofp);
+               if (err >= 0)
+                       break;
+               err = nfs4_handle_exception(NFS_SERVER(inode), err,
+                               &exception);
+       } while (exception.retry);
+
+       return err;
+}
+
+int nfs42_proc_removexattr(struct inode *inode, const char *name)
+{
+       struct nfs4_exception exception = { };
+       int err;
+
+       do {
+               err = _nfs42_proc_removexattr(inode, name);
+               if (!err)
+                       break;
+               err = nfs4_handle_exception(NFS_SERVER(inode), err,
+                               &exception);
+       } while (exception.retry);
+
+       return err;
+}
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
new file mode 100644 (file)
index 0000000..8677799
--- /dev/null
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * User extended attribute client side cache functions.
+ *
+ * Author: Frank van der Linden <fllinden@amazon.com>
+ */
+#include <linux/errno.h>
+#include <linux/nfs_fs.h>
+#include <linux/hashtable.h>
+#include <linux/refcount.h>
+#include <uapi/linux/xattr.h>
+
+#include "nfs4_fs.h"
+#include "internal.h"
+
+/*
+ * User extended attributes client side caching is implemented by having
+ * a cache structure attached to NFS inodes. This structure is allocated
+ * when needed, and freed when the cache is zapped.
+ *
+ * The cache structure contains as hash table of entries, and a pointer
+ * to a special-cased entry for the listxattr cache.
+ *
+ * Accessing and allocating / freeing the caches is done via reference
+ * counting. The cache entries use a similar refcounting scheme.
+ *
+ * This makes freeing a cache, both from the shrinker and from the
+ * zap cache path, easy. It also means that, in current use cases,
+ * the large majority of inodes will not waste any memory, as they
+ * will never have any user extended attributes assigned to them.
+ *
+ * Attribute entries are hashed in to a simple hash table. They are
+ * also part of an LRU.
+ *
+ * There are three shrinkers.
+ *
+ * Two shrinkers deal with the cache entries themselves: one for
+ * large entries (> PAGE_SIZE), and one for smaller entries. The
+ * shrinker for the larger entries works more aggressively than
+ * those for the smaller entries.
+ *
+ * The other shrinker frees the cache structures themselves.
+ */
+
+/*
+ * 64 buckets is a good default. There is likely no reasonable
+ * workload that uses more than even 64 user extended attributes.
+ * You can certainly add a lot more - but you get what you ask for
+ * in those circumstances.
+ */
+#define NFS4_XATTR_HASH_SIZE   64
+
+#define NFSDBG_FACILITY        NFSDBG_XATTRCACHE
+
+struct nfs4_xattr_cache;
+struct nfs4_xattr_entry;
+
+struct nfs4_xattr_bucket {
+       spinlock_t lock;
+       struct hlist_head hlist;
+       struct nfs4_xattr_cache *cache;
+       bool draining;
+};
+
+struct nfs4_xattr_cache {
+       struct kref ref;
+       spinlock_t hash_lock;   /* protects hashtable and lru */
+       struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
+       struct list_head lru;
+       struct list_head dispose;
+       atomic_long_t nent;
+       spinlock_t listxattr_lock;
+       struct inode *inode;
+       struct nfs4_xattr_entry *listxattr;
+};
+
+struct nfs4_xattr_entry {
+       struct kref ref;
+       struct hlist_node hnode;
+       struct list_head lru;
+       struct list_head dispose;
+       char *xattr_name;
+       void *xattr_value;
+       size_t xattr_size;
+       struct nfs4_xattr_bucket *bucket;
+       uint32_t flags;
+};
+
+#define        NFS4_XATTR_ENTRY_EXTVAL 0x0001
+
+/*
+ * LRU list of NFS inodes that have xattr caches.
+ */
+static struct list_lru nfs4_xattr_cache_lru;
+static struct list_lru nfs4_xattr_entry_lru;
+static struct list_lru nfs4_xattr_large_entry_lru;
+
+static struct kmem_cache *nfs4_xattr_cache_cachep;
+
+/*
+ * Hashing helper functions.
+ */
+static void
+nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
+{
+       unsigned int i;
+
+       for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
+               INIT_HLIST_HEAD(&cache->buckets[i].hlist);
+               spin_lock_init(&cache->buckets[i].lock);
+               cache->buckets[i].cache = cache;
+               cache->buckets[i].draining = false;
+       }
+}
+
+/*
+ * Locking order:
+ * 1. inode i_lock or bucket lock
+ * 2. list_lru lock (taken by list_lru_* functions)
+ */
+
+/*
+ * Wrapper functions to add a cache entry to the right LRU.
+ */
+static bool
+nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
+{
+       struct list_lru *lru;
+
+       lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
+           &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
+
+       return list_lru_add(lru, &entry->lru);
+}
+
+static bool
+nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
+{
+       struct list_lru *lru;
+
+       lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
+           &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
+
+       return list_lru_del(lru, &entry->lru);
+}
+
+/*
+ * This function allocates cache entries. They are the normal
+ * extended attribute name/value pairs, but may also be a listxattr
+ * cache. Those allocations use the same entry so that they can be
+ * treated as one by the memory shrinker.
+ *
+ * xattr cache entries are allocated together with names. If the
+ * value fits in to one page with the entry structure and the name,
+ * it will also be part of the same allocation (kmalloc). This is
+ * expected to be the vast majority of cases. Larger allocations
+ * have a value pointer that is allocated separately by kvmalloc.
+ *
+ * Parameters:
+ *
+ * @name:  Name of the extended attribute. NULL for listxattr cache
+ *         entry.
+ * @value: Value of attribute, or listxattr cache. NULL if the
+ *         value is to be copied from pages instead.
+ * @pages: Pages to copy the value from, if not NULL. Passed in to
+ *        make it easier to copy the value after an RPC, even if
+ *        the value will not be passed up to application (e.g.
+ *        for a 'query' getxattr with NULL buffer).
+ * @len:   Length of the value. Can be 0 for zero-length attribues.
+ *         @value and @pages will be NULL if @len is 0.
+ */
+static struct nfs4_xattr_entry *
+nfs4_xattr_alloc_entry(const char *name, const void *value,
+                      struct page **pages, size_t len)
+{
+       struct nfs4_xattr_entry *entry;
+       void *valp;
+       char *namep;
+       size_t alloclen, slen;
+       char *buf;
+       uint32_t flags;
+
+       BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
+           XATTR_NAME_MAX + 1 > PAGE_SIZE);
+
+       alloclen = sizeof(struct nfs4_xattr_entry);
+       if (name != NULL) {
+               slen = strlen(name) + 1;
+               alloclen += slen;
+       } else
+               slen = 0;
+
+       if (alloclen + len <= PAGE_SIZE) {
+               alloclen += len;
+               flags = 0;
+       } else {
+               flags = NFS4_XATTR_ENTRY_EXTVAL;
+       }
+
+       buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
+       if (buf == NULL)
+               return NULL;
+       entry = (struct nfs4_xattr_entry *)buf;
+
+       if (name != NULL) {
+               namep = buf + sizeof(struct nfs4_xattr_entry);
+               memcpy(namep, name, slen);
+       } else {
+               namep = NULL;
+       }
+
+
+       if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
+               valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
+               if (valp == NULL) {
+                       kfree(buf);
+                       return NULL;
+               }
+       } else if (len != 0) {
+               valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
+       } else
+               valp = NULL;
+
+       if (valp != NULL) {
+               if (value != NULL)
+                       memcpy(valp, value, len);
+               else
+                       _copy_from_pages(valp, pages, 0, len);
+       }
+
+       entry->flags = flags;
+       entry->xattr_value = valp;
+       kref_init(&entry->ref);
+       entry->xattr_name = namep;
+       entry->xattr_size = len;
+       entry->bucket = NULL;
+       INIT_LIST_HEAD(&entry->lru);
+       INIT_LIST_HEAD(&entry->dispose);
+       INIT_HLIST_NODE(&entry->hnode);
+
+       return entry;
+}
+
+static void
+nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
+{
+       if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
+               kvfree(entry->xattr_value);
+       kfree(entry);
+}
+
+static void
+nfs4_xattr_free_entry_cb(struct kref *kref)
+{
+       struct nfs4_xattr_entry *entry;
+
+       entry = container_of(kref, struct nfs4_xattr_entry, ref);
+
+       if (WARN_ON(!list_empty(&entry->lru)))
+               return;
+
+       nfs4_xattr_free_entry(entry);
+}
+
+static void
+nfs4_xattr_free_cache_cb(struct kref *kref)
+{
+       struct nfs4_xattr_cache *cache;
+       int i;
+
+       cache = container_of(kref, struct nfs4_xattr_cache, ref);
+
+       for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
+               if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
+                       return;
+               cache->buckets[i].draining = false;
+       }
+
+       cache->listxattr = NULL;
+
+       kmem_cache_free(nfs4_xattr_cache_cachep, cache);
+
+}
+
+static struct nfs4_xattr_cache *
+nfs4_xattr_alloc_cache(void)
+{
+       struct nfs4_xattr_cache *cache;
+
+       cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
+           GFP_KERNEL_ACCOUNT | GFP_NOFS);
+       if (cache == NULL)
+               return NULL;
+
+       kref_init(&cache->ref);
+       atomic_long_set(&cache->nent, 0);
+
+       return cache;
+}
+
+/*
+ * Set the listxattr cache, which is a special-cased cache entry.
+ * The special value ERR_PTR(-ESTALE) is used to indicate that
+ * the cache is being drained - this prevents a new listxattr
+ * cache from being added to what is now a stale cache.
+ */
+static int
+nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
+                        struct nfs4_xattr_entry *new)
+{
+       struct nfs4_xattr_entry *old;
+       int ret = 1;
+
+       spin_lock(&cache->listxattr_lock);
+
+       old = cache->listxattr;
+
+       if (old == ERR_PTR(-ESTALE)) {
+               ret = 0;
+               goto out;
+       }
+
+       cache->listxattr = new;
+       if (new != NULL && new != ERR_PTR(-ESTALE))
+               nfs4_xattr_entry_lru_add(new);
+
+       if (old != NULL) {
+               nfs4_xattr_entry_lru_del(old);
+               kref_put(&old->ref, nfs4_xattr_free_entry_cb);
+       }
+out:
+       spin_unlock(&cache->listxattr_lock);
+
+       return ret;
+}
+
+/*
+ * Unlink a cache from its parent inode, clearing out an invalid
+ * cache. Must be called with i_lock held.
+ */
+static struct nfs4_xattr_cache *
+nfs4_xattr_cache_unlink(struct inode *inode)
+{
+       struct nfs_inode *nfsi;
+       struct nfs4_xattr_cache *oldcache;
+
+       nfsi = NFS_I(inode);
+
+       oldcache = nfsi->xattr_cache;
+       if (oldcache != NULL) {
+               list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
+               oldcache->inode = NULL;
+       }
+       nfsi->xattr_cache = NULL;
+       nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
+
+       return oldcache;
+
+}
+
+/*
+ * Discard a cache. Called by get_cache() if there was an old,
+ * invalid cache. Can also be called from a shrinker callback.
+ *
+ * The cache is dead, it has already been unlinked from its inode,
+ * and no longer appears on the cache LRU list.
+ *
+ * Mark all buckets as draining, so that no new entries are added. This
+ * could still happen in the unlikely, but possible case that another
+ * thread had grabbed a reference before it was unlinked from the inode,
+ * and is still holding it for an add operation.
+ *
+ * Remove all entries from the LRU lists, so that there is no longer
+ * any way to 'find' this cache. Then, remove the entries from the hash
+ * table.
+ *
+ * At that point, the cache will remain empty and can be freed when the final
+ * reference drops, which is very likely the kref_put at the end of
+ * this function, or the one called immediately afterwards in the
+ * shrinker callback.
+ */
+static void
+nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
+{
+       unsigned int i;
+       struct nfs4_xattr_entry *entry;
+       struct nfs4_xattr_bucket *bucket;
+       struct hlist_node *n;
+
+       nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
+
+       for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
+               bucket = &cache->buckets[i];
+
+               spin_lock(&bucket->lock);
+               bucket->draining = true;
+               hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
+                       nfs4_xattr_entry_lru_del(entry);
+                       hlist_del_init(&entry->hnode);
+                       kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+               }
+               spin_unlock(&bucket->lock);
+       }
+
+       atomic_long_set(&cache->nent, 0);
+
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+}
+
+/*
+ * Get a referenced copy of the cache structure. Avoid doing allocs
+ * while holding i_lock. Which means that we do some optimistic allocation,
+ * and might have to free the result in rare cases.
+ *
+ * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
+ * and acts accordingly, replacing the cache when needed. For the read case
+ * (!add), this means that the caller must make sure that the cache
+ * is valid before caling this function. getxattr and listxattr call
+ * revalidate_inode to do this. The attribute cache timeout (for the
+ * non-delegated case) is expected to be dealt with in the revalidate
+ * call.
+ */
+
+static struct nfs4_xattr_cache *
+nfs4_xattr_get_cache(struct inode *inode, int add)
+{
+       struct nfs_inode *nfsi;
+       struct nfs4_xattr_cache *cache, *oldcache, *newcache;
+
+       nfsi = NFS_I(inode);
+
+       cache = oldcache = NULL;
+
+       spin_lock(&inode->i_lock);
+
+       if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
+               oldcache = nfs4_xattr_cache_unlink(inode);
+       else
+               cache = nfsi->xattr_cache;
+
+       if (cache != NULL)
+               kref_get(&cache->ref);
+
+       spin_unlock(&inode->i_lock);
+
+       if (add && cache == NULL) {
+               newcache = NULL;
+
+               cache = nfs4_xattr_alloc_cache();
+               if (cache == NULL)
+                       goto out;
+
+               spin_lock(&inode->i_lock);
+               if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
+                       /*
+                        * The cache was invalidated again. Give up,
+                        * since what we want to enter is now likely
+                        * outdated anyway.
+                        */
+                       spin_unlock(&inode->i_lock);
+                       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+                       cache = NULL;
+                       goto out;
+               }
+
+               /*
+                * Check if someone beat us to it.
+                */
+               if (nfsi->xattr_cache != NULL) {
+                       newcache = nfsi->xattr_cache;
+                       kref_get(&newcache->ref);
+               } else {
+                       kref_get(&cache->ref);
+                       nfsi->xattr_cache = cache;
+                       cache->inode = inode;
+                       list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
+               }
+
+               spin_unlock(&inode->i_lock);
+
+               /*
+                * If there was a race, throw away the cache we just
+                * allocated, and use the new one allocated by someone
+                * else.
+                */
+               if (newcache != NULL) {
+                       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+                       cache = newcache;
+               }
+       }
+
+out:
+       /*
+        * Discard the now orphaned old cache.
+        */
+       if (oldcache != NULL)
+               nfs4_xattr_discard_cache(oldcache);
+
+       return cache;
+}
+
+static inline struct nfs4_xattr_bucket *
+nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
+{
+       return &cache->buckets[jhash(name, strlen(name), 0) &
+           (ARRAY_SIZE(cache->buckets) - 1)];
+}
+
+static struct nfs4_xattr_entry *
+nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
+{
+       struct nfs4_xattr_entry *entry;
+
+       entry = NULL;
+
+       hlist_for_each_entry(entry, &bucket->hlist, hnode) {
+               if (!strcmp(entry->xattr_name, name))
+                       break;
+       }
+
+       return entry;
+}
+
+static int
+nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
+                   struct nfs4_xattr_entry *entry)
+{
+       struct nfs4_xattr_bucket *bucket;
+       struct nfs4_xattr_entry *oldentry = NULL;
+       int ret = 1;
+
+       bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
+       entry->bucket = bucket;
+
+       spin_lock(&bucket->lock);
+
+       if (bucket->draining) {
+               ret = 0;
+               goto out;
+       }
+
+       oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
+       if (oldentry != NULL) {
+               hlist_del_init(&oldentry->hnode);
+               nfs4_xattr_entry_lru_del(oldentry);
+       } else {
+               atomic_long_inc(&cache->nent);
+       }
+
+       hlist_add_head(&entry->hnode, &bucket->hlist);
+       nfs4_xattr_entry_lru_add(entry);
+
+out:
+       spin_unlock(&bucket->lock);
+
+       if (oldentry != NULL)
+               kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
+
+       return ret;
+}
+
+static void
+nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
+{
+       struct nfs4_xattr_bucket *bucket;
+       struct nfs4_xattr_entry *entry;
+
+       bucket = nfs4_xattr_hash_bucket(cache, name);
+
+       spin_lock(&bucket->lock);
+
+       entry = nfs4_xattr_get_entry(bucket, name);
+       if (entry != NULL) {
+               hlist_del_init(&entry->hnode);
+               nfs4_xattr_entry_lru_del(entry);
+               atomic_long_dec(&cache->nent);
+       }
+
+       spin_unlock(&bucket->lock);
+
+       if (entry != NULL)
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+}
+
+static struct nfs4_xattr_entry *
+nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
+{
+       struct nfs4_xattr_bucket *bucket;
+       struct nfs4_xattr_entry *entry;
+
+       bucket = nfs4_xattr_hash_bucket(cache, name);
+
+       spin_lock(&bucket->lock);
+
+       entry = nfs4_xattr_get_entry(bucket, name);
+       if (entry != NULL)
+               kref_get(&entry->ref);
+
+       spin_unlock(&bucket->lock);
+
+       return entry;
+}
+
+/*
+ * Entry point to retrieve an entry from the cache.
+ */
+ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
+                        ssize_t buflen)
+{
+       struct nfs4_xattr_cache *cache;
+       struct nfs4_xattr_entry *entry;
+       ssize_t ret;
+
+       cache = nfs4_xattr_get_cache(inode, 0);
+       if (cache == NULL)
+               return -ENOENT;
+
+       ret = 0;
+       entry = nfs4_xattr_hash_find(cache, name);
+
+       if (entry != NULL) {
+               dprintk("%s: cache hit '%s', len %lu\n", __func__,
+                   entry->xattr_name, (unsigned long)entry->xattr_size);
+               if (buflen == 0) {
+                       /* Length probe only */
+                       ret = entry->xattr_size;
+               } else if (buflen < entry->xattr_size)
+                       ret = -ERANGE;
+               else {
+                       memcpy(buf, entry->xattr_value, entry->xattr_size);
+                       ret = entry->xattr_size;
+               }
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+       } else {
+               dprintk("%s: cache miss '%s'\n", __func__, name);
+               ret = -ENOENT;
+       }
+
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+
+       return ret;
+}
+
+/*
+ * Retrieve a cached list of xattrs from the cache.
+ */
+ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
+{
+       struct nfs4_xattr_cache *cache;
+       struct nfs4_xattr_entry *entry;
+       ssize_t ret;
+
+       cache = nfs4_xattr_get_cache(inode, 0);
+       if (cache == NULL)
+               return -ENOENT;
+
+       spin_lock(&cache->listxattr_lock);
+
+       entry = cache->listxattr;
+
+       if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
+               if (buflen == 0) {
+                       /* Length probe only */
+                       ret = entry->xattr_size;
+               } else if (entry->xattr_size > buflen)
+                       ret = -ERANGE;
+               else {
+                       memcpy(buf, entry->xattr_value, entry->xattr_size);
+                       ret = entry->xattr_size;
+               }
+       } else {
+               ret = -ENOENT;
+       }
+
+       spin_unlock(&cache->listxattr_lock);
+
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+
+       return ret;
+}
+
+/*
+ * Add an xattr to the cache.
+ *
+ * This also invalidates the xattr list cache.
+ */
+void nfs4_xattr_cache_add(struct inode *inode, const char *name,
+                         const char *buf, struct page **pages, ssize_t buflen)
+{
+       struct nfs4_xattr_cache *cache;
+       struct nfs4_xattr_entry *entry;
+
+       dprintk("%s: add '%s' len %lu\n", __func__,
+           name, (unsigned long)buflen);
+
+       cache = nfs4_xattr_get_cache(inode, 1);
+       if (cache == NULL)
+               return;
+
+       entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
+       if (entry == NULL)
+               goto out;
+
+       (void)nfs4_xattr_set_listcache(cache, NULL);
+
+       if (!nfs4_xattr_hash_add(cache, entry))
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+
+out:
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+}
+
+
+/*
+ * Remove an xattr from the cache.
+ *
+ * This also invalidates the xattr list cache.
+ */
+void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
+{
+       struct nfs4_xattr_cache *cache;
+
+       dprintk("%s: remove '%s'\n", __func__, name);
+
+       cache = nfs4_xattr_get_cache(inode, 0);
+       if (cache == NULL)
+               return;
+
+       (void)nfs4_xattr_set_listcache(cache, NULL);
+       nfs4_xattr_hash_remove(cache, name);
+
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+}
+
+/*
+ * Cache listxattr output, replacing any possible old one.
+ */
+void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
+                              ssize_t buflen)
+{
+       struct nfs4_xattr_cache *cache;
+       struct nfs4_xattr_entry *entry;
+
+       cache = nfs4_xattr_get_cache(inode, 1);
+       if (cache == NULL)
+               return;
+
+       entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
+       if (entry == NULL)
+               goto out;
+
+       /*
+        * This is just there to be able to get to bucket->cache,
+        * which is obviously the same for all buckets, so just
+        * use bucket 0.
+        */
+       entry->bucket = &cache->buckets[0];
+
+       if (!nfs4_xattr_set_listcache(cache, entry))
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+
+out:
+       kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+}
+
+/*
+ * Zap the entire cache. Called when an inode is evicted.
+ */
+void nfs4_xattr_cache_zap(struct inode *inode)
+{
+       struct nfs4_xattr_cache *oldcache;
+
+       spin_lock(&inode->i_lock);
+       oldcache = nfs4_xattr_cache_unlink(inode);
+       spin_unlock(&inode->i_lock);
+
+       if (oldcache)
+               nfs4_xattr_discard_cache(oldcache);
+}
+
+/*
+ * The entry LRU is shrunk more aggressively than the cache LRU,
+ * by settings @seeks to 1.
+ *
+ * Cache structures are freed only when they've become empty, after
+ * pruning all but one entry.
+ */
+
+static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
+                                           struct shrink_control *sc);
+static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
+                                           struct shrink_control *sc);
+static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
+                                          struct shrink_control *sc);
+static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
+                                          struct shrink_control *sc);
+
+static struct shrinker nfs4_xattr_cache_shrinker = {
+       .count_objects  = nfs4_xattr_cache_count,
+       .scan_objects   = nfs4_xattr_cache_scan,
+       .seeks          = DEFAULT_SEEKS,
+       .flags          = SHRINKER_MEMCG_AWARE,
+};
+
+static struct shrinker nfs4_xattr_entry_shrinker = {
+       .count_objects  = nfs4_xattr_entry_count,
+       .scan_objects   = nfs4_xattr_entry_scan,
+       .seeks          = DEFAULT_SEEKS,
+       .batch          = 512,
+       .flags          = SHRINKER_MEMCG_AWARE,
+};
+
+static struct shrinker nfs4_xattr_large_entry_shrinker = {
+       .count_objects  = nfs4_xattr_entry_count,
+       .scan_objects   = nfs4_xattr_entry_scan,
+       .seeks          = 1,
+       .batch          = 512,
+       .flags          = SHRINKER_MEMCG_AWARE,
+};
+
+static enum lru_status
+cache_lru_isolate(struct list_head *item,
+       struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+{
+       struct list_head *dispose = arg;
+       struct inode *inode;
+       struct nfs4_xattr_cache *cache = container_of(item,
+           struct nfs4_xattr_cache, lru);
+
+       if (atomic_long_read(&cache->nent) > 1)
+               return LRU_SKIP;
+
+       /*
+        * If a cache structure is on the LRU list, we know that
+        * its inode is valid. Try to lock it to break the link.
+        * Since we're inverting the lock order here, only try.
+        */
+       inode = cache->inode;
+
+       if (!spin_trylock(&inode->i_lock))
+               return LRU_SKIP;
+
+       kref_get(&cache->ref);
+
+       cache->inode = NULL;
+       NFS_I(inode)->xattr_cache = NULL;
+       NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
+       list_lru_isolate(lru, &cache->lru);
+
+       spin_unlock(&inode->i_lock);
+
+       list_add_tail(&cache->dispose, dispose);
+       return LRU_REMOVED;
+}
+
+static unsigned long
+nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+       LIST_HEAD(dispose);
+       unsigned long freed;
+       struct nfs4_xattr_cache *cache;
+
+       freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
+           cache_lru_isolate, &dispose);
+       while (!list_empty(&dispose)) {
+               cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
+                   dispose);
+               list_del_init(&cache->dispose);
+               nfs4_xattr_discard_cache(cache);
+               kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
+       }
+
+       return freed;
+}
+
+
+static unsigned long
+nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned long count;
+
+       count = list_lru_count(&nfs4_xattr_cache_lru);
+       return vfs_pressure_ratio(count);
+}
+
+static enum lru_status
+entry_lru_isolate(struct list_head *item,
+       struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+{
+       struct list_head *dispose = arg;
+       struct nfs4_xattr_bucket *bucket;
+       struct nfs4_xattr_cache *cache;
+       struct nfs4_xattr_entry *entry = container_of(item,
+           struct nfs4_xattr_entry, lru);
+
+       bucket = entry->bucket;
+       cache = bucket->cache;
+
+       /*
+        * Unhook the entry from its parent (either a cache bucket
+        * or a cache structure if it's a listxattr buf), so that
+        * it's no longer found. Then add it to the isolate list,
+        * to be freed later.
+        *
+        * In both cases, we're reverting lock order, so use
+        * trylock and skip the entry if we can't get the lock.
+        */
+       if (entry->xattr_name != NULL) {
+               /* Regular cache entry */
+               if (!spin_trylock(&bucket->lock))
+                       return LRU_SKIP;
+
+               kref_get(&entry->ref);
+
+               hlist_del_init(&entry->hnode);
+               atomic_long_dec(&cache->nent);
+               list_lru_isolate(lru, &entry->lru);
+
+               spin_unlock(&bucket->lock);
+       } else {
+               /* Listxattr cache entry */
+               if (!spin_trylock(&cache->listxattr_lock))
+                       return LRU_SKIP;
+
+               kref_get(&entry->ref);
+
+               cache->listxattr = NULL;
+               list_lru_isolate(lru, &entry->lru);
+
+               spin_unlock(&cache->listxattr_lock);
+       }
+
+       list_add_tail(&entry->dispose, dispose);
+       return LRU_REMOVED;
+}
+
+static unsigned long
+nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+       LIST_HEAD(dispose);
+       unsigned long freed;
+       struct nfs4_xattr_entry *entry;
+       struct list_lru *lru;
+
+       lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
+           &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
+
+       freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
+
+       while (!list_empty(&dispose)) {
+               entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
+                   dispose);
+               list_del_init(&entry->dispose);
+
+               /*
+                * Drop two references: the one that we just grabbed
+                * in entry_lru_isolate, and the one that was set
+                * when the entry was first allocated.
+                */
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+               kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
+       }
+
+       return freed;
+}
+
+static unsigned long
+nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned long count;
+       struct list_lru *lru;
+
+       lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
+           &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
+
+       count = list_lru_count(lru);
+       return vfs_pressure_ratio(count);
+}
+
+
+static void nfs4_xattr_cache_init_once(void *p)
+{
+       struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
+
+       spin_lock_init(&cache->listxattr_lock);
+       atomic_long_set(&cache->nent, 0);
+       nfs4_xattr_hash_init(cache);
+       cache->listxattr = NULL;
+       INIT_LIST_HEAD(&cache->lru);
+       INIT_LIST_HEAD(&cache->dispose);
+}
+
+int __init nfs4_xattr_cache_init(void)
+{
+       int ret = 0;
+
+       nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
+           sizeof(struct nfs4_xattr_cache), 0,
+           (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+           nfs4_xattr_cache_init_once);
+       if (nfs4_xattr_cache_cachep == NULL)
+               return -ENOMEM;
+
+       ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
+           &nfs4_xattr_large_entry_shrinker);
+       if (ret)
+               goto out4;
+
+       ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
+           &nfs4_xattr_entry_shrinker);
+       if (ret)
+               goto out3;
+
+       ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
+           &nfs4_xattr_cache_shrinker);
+       if (ret)
+               goto out2;
+
+       ret = register_shrinker(&nfs4_xattr_cache_shrinker);
+       if (ret)
+               goto out1;
+
+       ret = register_shrinker(&nfs4_xattr_entry_shrinker);
+       if (ret)
+               goto out;
+
+       ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
+       if (!ret)
+               return 0;
+
+       unregister_shrinker(&nfs4_xattr_entry_shrinker);
+out:
+       unregister_shrinker(&nfs4_xattr_cache_shrinker);
+out1:
+       list_lru_destroy(&nfs4_xattr_cache_lru);
+out2:
+       list_lru_destroy(&nfs4_xattr_entry_lru);
+out3:
+       list_lru_destroy(&nfs4_xattr_large_entry_lru);
+out4:
+       kmem_cache_destroy(nfs4_xattr_cache_cachep);
+
+       return ret;
+}
+
+void nfs4_xattr_cache_exit(void)
+{
+       unregister_shrinker(&nfs4_xattr_entry_shrinker);
+       unregister_shrinker(&nfs4_xattr_cache_shrinker);
+       list_lru_destroy(&nfs4_xattr_entry_lru);
+       list_lru_destroy(&nfs4_xattr_cache_lru);
+       kmem_cache_destroy(nfs4_xattr_cache_cachep);
+}
index c03f324..cc50085 100644 (file)
                                         decode_clone_maxsz + \
                                         decode_getattr_maxsz)
 
+/* Not limited by NFS itself, limited by the generic xattr code */
+#define nfs4_xattr_name_maxsz   XDR_QUADLEN(XATTR_NAME_MAX)
+
+#define encode_getxattr_maxsz   (op_encode_hdr_maxsz + 1 + \
+                                nfs4_xattr_name_maxsz)
+#define decode_getxattr_maxsz   (op_decode_hdr_maxsz + 1 + 1)
+#define encode_setxattr_maxsz   (op_encode_hdr_maxsz + \
+                                1 + nfs4_xattr_name_maxsz + 1)
+#define decode_setxattr_maxsz   (op_decode_hdr_maxsz + decode_change_info_maxsz)
+#define encode_listxattrs_maxsz  (op_encode_hdr_maxsz + 2 + 1)
+#define decode_listxattrs_maxsz  (op_decode_hdr_maxsz + 2 + 1 + 1)
+#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
+                                 nfs4_xattr_name_maxsz)
+#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
+                                 decode_change_info_maxsz)
+
+#define NFS4_enc_getxattr_sz   (compound_encode_hdr_maxsz + \
+                               encode_sequence_maxsz + \
+                               encode_putfh_maxsz + \
+                               encode_getxattr_maxsz)
+#define NFS4_dec_getxattr_sz   (compound_decode_hdr_maxsz + \
+                               decode_sequence_maxsz + \
+                               decode_putfh_maxsz + \
+                               decode_getxattr_maxsz)
+#define NFS4_enc_setxattr_sz   (compound_encode_hdr_maxsz + \
+                               encode_sequence_maxsz + \
+                               encode_putfh_maxsz + \
+                               encode_setxattr_maxsz)
+#define NFS4_dec_setxattr_sz   (compound_decode_hdr_maxsz + \
+                               decode_sequence_maxsz + \
+                               decode_putfh_maxsz + \
+                               decode_setxattr_maxsz)
+#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \
+                               encode_sequence_maxsz + \
+                               encode_putfh_maxsz + \
+                               encode_listxattrs_maxsz)
+#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \
+                               decode_sequence_maxsz + \
+                               decode_putfh_maxsz + \
+                               decode_listxattrs_maxsz)
+#define NFS4_enc_removexattr_sz        (compound_encode_hdr_maxsz + \
+                               encode_sequence_maxsz + \
+                               encode_putfh_maxsz + \
+                               encode_removexattr_maxsz)
+#define NFS4_dec_removexattr_sz        (compound_decode_hdr_maxsz + \
+                               decode_sequence_maxsz + \
+                               decode_putfh_maxsz + \
+                               decode_removexattr_maxsz)
+
+/*
+ * These values specify the maximum amount of data that is not
+ * associated with the extended attribute name or extended
+ * attribute list in the SETXATTR, GETXATTR and LISTXATTR
+ * respectively.
+ */
+const u32 nfs42_maxsetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+                                       compound_encode_hdr_maxsz +
+                                       encode_sequence_maxsz +
+                                       encode_putfh_maxsz + 1 +
+                                       nfs4_xattr_name_maxsz)
+                                       * XDR_UNIT);
+
+const u32 nfs42_maxgetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+                                       compound_decode_hdr_maxsz +
+                                       decode_sequence_maxsz +
+                                       decode_putfh_maxsz + 1) * XDR_UNIT);
+
+const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+                                       compound_decode_hdr_maxsz +
+                                       decode_sequence_maxsz +
+                                       decode_putfh_maxsz + 3) * XDR_UNIT);
+
 static void encode_fallocate(struct xdr_stream *xdr,
                             const struct nfs42_falloc_args *args)
 {
@@ -333,6 +405,210 @@ static void encode_layouterror(struct xdr_stream *xdr,
        encode_device_error(xdr, &args->errors[0]);
 }
 
+static void encode_setxattr(struct xdr_stream *xdr,
+                           const struct nfs42_setxattrargs *arg,
+                           struct compound_hdr *hdr)
+{
+       __be32 *p;
+
+       BUILD_BUG_ON(XATTR_CREATE != SETXATTR4_CREATE);
+       BUILD_BUG_ON(XATTR_REPLACE != SETXATTR4_REPLACE);
+
+       encode_op_hdr(xdr, OP_SETXATTR, decode_setxattr_maxsz, hdr);
+       p = reserve_space(xdr, 4);
+       *p = cpu_to_be32(arg->xattr_flags);
+       encode_string(xdr, strlen(arg->xattr_name), arg->xattr_name);
+       p = reserve_space(xdr, 4);
+       *p = cpu_to_be32(arg->xattr_len);
+       if (arg->xattr_len)
+               xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len);
+}
+
+static int decode_setxattr(struct xdr_stream *xdr,
+                          struct nfs4_change_info *cinfo)
+{
+       int status;
+
+       status = decode_op_hdr(xdr, OP_SETXATTR);
+       if (status)
+               goto out;
+       status = decode_change_info(xdr, cinfo);
+out:
+       return status;
+}
+
+
+static void encode_getxattr(struct xdr_stream *xdr, const char *name,
+                           struct compound_hdr *hdr)
+{
+       encode_op_hdr(xdr, OP_GETXATTR, decode_getxattr_maxsz, hdr);
+       encode_string(xdr, strlen(name), name);
+}
+
+static int decode_getxattr(struct xdr_stream *xdr,
+                          struct nfs42_getxattrres *res,
+                          struct rpc_rqst *req)
+{
+       int status;
+       __be32 *p;
+       u32 len, rdlen;
+
+       status = decode_op_hdr(xdr, OP_GETXATTR);
+       if (status)
+               return status;
+
+       p = xdr_inline_decode(xdr, 4);
+       if (unlikely(!p))
+               return -EIO;
+
+       len = be32_to_cpup(p);
+       if (len > req->rq_rcv_buf.page_len)
+               return -ERANGE;
+
+       res->xattr_len = len;
+
+       if (len > 0) {
+               rdlen = xdr_read_pages(xdr, len);
+               if (rdlen < len)
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static void encode_removexattr(struct xdr_stream *xdr, const char *name,
+                              struct compound_hdr *hdr)
+{
+       encode_op_hdr(xdr, OP_REMOVEXATTR, decode_removexattr_maxsz, hdr);
+       encode_string(xdr, strlen(name), name);
+}
+
+
+static int decode_removexattr(struct xdr_stream *xdr,
+                          struct nfs4_change_info *cinfo)
+{
+       int status;
+
+       status = decode_op_hdr(xdr, OP_REMOVEXATTR);
+       if (status)
+               goto out;
+
+       status = decode_change_info(xdr, cinfo);
+out:
+       return status;
+}
+
+static void encode_listxattrs(struct xdr_stream *xdr,
+                            const struct nfs42_listxattrsargs *arg,
+                            struct compound_hdr *hdr)
+{
+       __be32 *p;
+
+       encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr);
+
+       p = reserve_space(xdr, 12);
+       if (unlikely(!p))
+               return;
+
+       p = xdr_encode_hyper(p, arg->cookie);
+       /*
+        * RFC 8276 says to specify the full max length of the LISTXATTRS
+        * XDR reply. Count is set to the XDR length of the names array
+        * plus the EOF marker. So, add the cookie and the names count.
+        */
+       *p = cpu_to_be32(arg->count + 8 + 4);
+}
+
+static int decode_listxattrs(struct xdr_stream *xdr,
+                           struct nfs42_listxattrsres *res)
+{
+       int status;
+       __be32 *p;
+       u32 count, len, ulen;
+       size_t left, copied;
+       char *buf;
+
+       status = decode_op_hdr(xdr, OP_LISTXATTRS);
+       if (status) {
+               /*
+                * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL
+                * should be translated to ERANGE.
+                */
+               if (status == -ETOOSMALL)
+                       status = -ERANGE;
+               goto out;
+       }
+
+       p = xdr_inline_decode(xdr, 8);
+       if (unlikely(!p))
+               return -EIO;
+
+       xdr_decode_hyper(p, &res->cookie);
+
+       p = xdr_inline_decode(xdr, 4);
+       if (unlikely(!p))
+               return -EIO;
+
+       left = res->xattr_len;
+       buf = res->xattr_buf;
+
+       count = be32_to_cpup(p);
+       copied = 0;
+
+       /*
+        * We have asked for enough room to encode the maximum number
+        * of possible attribute names, so everything should fit.
+        *
+        * But, don't rely on that assumption. Just decode entries
+        * until they don't fit anymore, just in case the server did
+        * something odd.
+        */
+       while (count--) {
+               p = xdr_inline_decode(xdr, 4);
+               if (unlikely(!p))
+                       return -EIO;
+
+               len = be32_to_cpup(p);
+               if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+                       status = -ERANGE;
+                       goto out;
+               }
+
+               p = xdr_inline_decode(xdr, len);
+               if (unlikely(!p))
+                       return -EIO;
+
+               ulen = len + XATTR_USER_PREFIX_LEN + 1;
+               if (buf) {
+                       if (ulen > left) {
+                               status = -ERANGE;
+                               goto out;
+                       }
+
+                       memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+                       memcpy(buf + XATTR_USER_PREFIX_LEN, p, len);
+
+                       buf[ulen - 1] = 0;
+                       buf += ulen;
+                       left -= ulen;
+               }
+               copied += ulen;
+       }
+
+       p = xdr_inline_decode(xdr, 4);
+       if (unlikely(!p))
+               return -EIO;
+
+       res->eof = be32_to_cpup(p);
+       res->copied = copied;
+
+out:
+       if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX)
+               status = -E2BIG;
+
+       return status;
+}
+
 /*
  * Encode ALLOCATE request
  */
@@ -988,4 +1264,166 @@ out:
        return status;
 }
 
+#ifdef CONFIG_NFS_V4_2
+static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+                                 const void *data)
+{
+       const struct nfs42_setxattrargs *args = data;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+       encode_setxattr(xdr, args, &hdr);
+       encode_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+                                void *data)
+{
+       struct nfs42_setxattrres *res = data;
+       struct compound_hdr hdr;
+       int status;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+       status = decode_sequence(xdr, &res->seq_res, req);
+       if (status)
+               goto out;
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+
+       status = decode_setxattr(xdr, &res->cinfo);
+out:
+       return status;
+}
+
+static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+                                 const void *data)
+{
+       const struct nfs42_getxattrargs *args = data;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+       size_t plen;
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+       encode_getxattr(xdr, args->xattr_name, &hdr);
+
+       plen = args->xattr_len ? args->xattr_len : XATTR_SIZE_MAX;
+
+       rpc_prepare_reply_pages(req, args->xattr_pages, 0, plen,
+           hdr.replen);
+       req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
+
+       encode_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp,
+                                struct xdr_stream *xdr, void *data)
+{
+       struct nfs42_getxattrres *res = data;
+       struct compound_hdr hdr;
+       int status;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+       status = decode_sequence(xdr, &res->seq_res, rqstp);
+       if (status)
+               goto out;
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+       status = decode_getxattr(xdr, res, rqstp);
+out:
+       return status;
+}
+
+static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
+                                   struct xdr_stream *xdr, const void *data)
+{
+       const struct nfs42_listxattrsargs *args = data;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+       encode_listxattrs(xdr, args, &hdr);
+
+       rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count,
+           hdr.replen);
+       req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
+
+       encode_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp,
+                                  struct xdr_stream *xdr, void *data)
+{
+       struct nfs42_listxattrsres *res = data;
+       struct compound_hdr hdr;
+       int status;
+
+       xdr_set_scratch_buffer(xdr, page_address(res->scratch), PAGE_SIZE);
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+       status = decode_sequence(xdr, &res->seq_res, rqstp);
+       if (status)
+               goto out;
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+       status = decode_listxattrs(xdr, res);
+out:
+       return status;
+}
+
+static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req,
+                                    struct xdr_stream *xdr, const void *data)
+{
+       const struct nfs42_removexattrargs *args = data;
+       struct compound_hdr hdr = {
+               .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+       };
+
+       encode_compound_hdr(xdr, req, &hdr);
+       encode_sequence(xdr, &args->seq_args, &hdr);
+       encode_putfh(xdr, args->fh, &hdr);
+       encode_removexattr(xdr, args->xattr_name, &hdr);
+       encode_nops(&hdr);
+}
+
+static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req,
+                                   struct xdr_stream *xdr, void *data)
+{
+       struct nfs42_removexattrres *res = data;
+       struct compound_hdr hdr;
+       int status;
+
+       status = decode_compound_hdr(xdr, &hdr);
+       if (status)
+               goto out;
+       status = decode_sequence(xdr, &res->seq_res, req);
+       if (status)
+               goto out;
+       status = decode_putfh(xdr);
+       if (status)
+               goto out;
+
+       status = decode_removexattr(xdr, &res->cinfo);
+out:
+       return status;
+}
+#endif
 #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */
index 2b7f6dc..0c9505d 100644 (file)
@@ -117,7 +117,7 @@ struct nfs4_state_owner {
        unsigned long        so_flags;
        struct list_head     so_states;
        struct nfs_seqid_counter so_seqid;
-       seqcount_t           so_reclaim_seqcount;
+       seqcount_spinlock_t  so_reclaim_seqcount;
        struct mutex         so_delegreturn_mutex;
 };
 
@@ -324,6 +324,13 @@ extern int update_open_stateid(struct nfs4_state *state,
 
 extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
                struct nfs_fsinfo *fsinfo);
+extern void nfs4_update_changeattr(struct inode *dir,
+                                  struct nfs4_change_info *cinfo,
+                                  unsigned long timestamp,
+                                  unsigned long cache_validity);
+extern int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
+                                   struct page **pages);
+
 #if defined(CONFIG_NFS_V4_1)
 extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
 extern int nfs4_proc_create_session(struct nfs_client *, const struct cred *);
@@ -557,6 +564,12 @@ static inline void nfs4_unregister_sysctl(void)
 /* nfs4xdr.c */
 extern const struct rpc_procinfo nfs4_procedures[];
 
+#ifdef CONFIG_NFS_V4_2
+extern const u32 nfs42_maxsetxattr_overhead;
+extern const u32 nfs42_maxgetxattr_overhead;
+extern const u32 nfs42_maxlistxattrs_overhead;
+#endif
+
 struct nfs4_mount_data;
 
 /* callback_xdr.c */
@@ -613,12 +626,34 @@ static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *
                nfs4_stateid_match_other(&state->open_stateid, stateid);
 }
 
+/* nfs42xattr.c */
+#ifdef CONFIG_NFS_V4_2
+extern int __init nfs4_xattr_cache_init(void);
+extern void nfs4_xattr_cache_exit(void);
+extern void nfs4_xattr_cache_add(struct inode *inode, const char *name,
+                                const char *buf, struct page **pages,
+                                ssize_t buflen);
+extern void nfs4_xattr_cache_remove(struct inode *inode, const char *name);
+extern ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name,
+                               char *buf, ssize_t buflen);
+extern void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
+                                     ssize_t buflen);
+extern ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf,
+                                    ssize_t buflen);
+extern void nfs4_xattr_cache_zap(struct inode *inode);
 #else
+static inline void nfs4_xattr_cache_zap(struct inode *inode)
+{
+}
+#endif /* CONFIG_NFS_V4_2 */
+
+#else /* CONFIG_NFS_V4 */
 
 #define nfs4_close_state(a, b) do { } while (0)
 #define nfs4_close_sync(a, b) do { } while (0)
 #define nfs4_state_protect(a, b, c, d) do { } while (0)
 #define nfs4_state_protect_write(a, b, c, d) do { } while (0)
 
+
 #endif /* CONFIG_NFS_V4 */
 #endif /* __LINUX_FS_NFS_NFS4_FS.H */
index 0bd77cc..daacc78 100644 (file)
@@ -880,7 +880,7 @@ static int nfs4_set_client(struct nfs_server *server,
 
        if (minorversion == 0)
                __set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags);
-       else if (proto == XPRT_TRANSPORT_TCP)
+       if (proto == XPRT_TRANSPORT_TCP)
                cl_init.nconnect = nconnect;
 
        if (server->flags & NFS_MOUNT_NORESVPORT)
@@ -992,6 +992,36 @@ static void nfs4_session_limit_rwsize(struct nfs_server *server)
 #endif /* CONFIG_NFS_V4_1 */
 }
 
+/*
+ * Limit xattr sizes using the channel attributes.
+ */
+static void nfs4_session_limit_xasize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_2
+       struct nfs4_session *sess;
+       u32 server_gxa_sz;
+       u32 server_sxa_sz;
+       u32 server_lxa_sz;
+
+       if (!nfs4_has_session(server->nfs_client))
+               return;
+
+       sess = server->nfs_client->cl_session;
+
+       server_gxa_sz = sess->fc_attrs.max_resp_sz - nfs42_maxgetxattr_overhead;
+       server_sxa_sz = sess->fc_attrs.max_rqst_sz - nfs42_maxsetxattr_overhead;
+       server_lxa_sz = sess->fc_attrs.max_resp_sz -
+           nfs42_maxlistxattrs_overhead;
+
+       if (server->gxasize > server_gxa_sz)
+               server->gxasize = server_gxa_sz;
+       if (server->sxasize > server_sxa_sz)
+               server->sxasize = server_sxa_sz;
+       if (server->lxasize > server_lxa_sz)
+               server->lxasize = server_lxa_sz;
+#endif
+}
+
 static int nfs4_server_common_setup(struct nfs_server *server,
                struct nfs_fh *mntfh, bool auth_probe)
 {
@@ -1039,6 +1069,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
                goto out;
 
        nfs4_session_limit_rwsize(server);
+       nfs4_session_limit_xasize(server);
 
        if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
                server->namelen = NFS4_MAXNAMLEN;
index 8e5d622..a339707 100644 (file)
@@ -110,6 +110,7 @@ static int
 nfs4_file_flush(struct file *file, fl_owner_t id)
 {
        struct inode    *inode = file_inode(file);
+       errseq_t since;
 
        dprintk("NFS: flush(%pD2)\n", file);
 
@@ -125,7 +126,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
                return filemap_fdatawrite(file->f_mapping);
 
        /* Flush writes to the server and return any errors */
-       return nfs_wb_all(inode);
+       since = filemap_sample_wb_err(file->f_mapping);
+       nfs_wb_all(inode);
+       return filemap_check_wb_err(file->f_mapping, since);
 }
 
 #ifdef CONFIG_NFS_V4_2
index 8963062..dbd0154 100644 (file)
@@ -66,6 +66,7 @@
 #include "nfs4idmap.h"
 #include "nfs4session.h"
 #include "fscache.h"
+#include "nfs42.h"
 
 #include "nfs4trace.h"
 
@@ -256,6 +257,7 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
                        | FATTR4_WORD1_FS_LAYOUT_TYPES,
                        FATTR4_WORD2_LAYOUT_BLKSIZE
                        | FATTR4_WORD2_CLONE_BLKSIZE
+                       | FATTR4_WORD2_XATTR_SUPPORT
 };
 
 const u32 nfs4_fs_locations_bitmap[3] = {
@@ -1173,37 +1175,49 @@ nfs4_dec_nlink_locked(struct inode *inode)
 }
 
 static void
-update_changeattr_locked(struct inode *dir, struct nfs4_change_info *cinfo,
+nfs4_update_changeattr_locked(struct inode *inode,
+               struct nfs4_change_info *cinfo,
                unsigned long timestamp, unsigned long cache_validity)
 {
-       struct nfs_inode *nfsi = NFS_I(dir);
+       struct nfs_inode *nfsi = NFS_I(inode);
 
        nfsi->cache_validity |= NFS_INO_INVALID_CTIME
                | NFS_INO_INVALID_MTIME
-               | NFS_INO_INVALID_DATA
                | cache_validity;
-       if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
+
+       if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) {
                nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
                nfsi->attrtimeo_timestamp = jiffies;
        } else {
-               nfs_force_lookup_revalidate(dir);
-               if (cinfo->before != inode_peek_iversion_raw(dir))
+               if (S_ISDIR(inode->i_mode)) {
+                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+                       nfs_force_lookup_revalidate(inode);
+               } else {
+                       if (!NFS_PROTO(inode)->have_delegation(inode,
+                                                              FMODE_READ))
+                               nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
+               }
+
+               if (cinfo->before != inode_peek_iversion_raw(inode))
                        nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
-                               NFS_INO_INVALID_ACL;
+                                               NFS_INO_INVALID_ACL |
+                                               NFS_INO_INVALID_XATTR;
        }
-       inode_set_iversion_raw(dir, cinfo->after);
+       inode_set_iversion_raw(inode, cinfo->after);
        nfsi->read_cache_jiffies = timestamp;
        nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
-       nfs_fscache_invalidate(dir);
+
+       if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+               nfs_fscache_invalidate(inode);
 }
 
-static void
-update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
+void
+nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
                unsigned long timestamp, unsigned long cache_validity)
 {
        spin_lock(&dir->i_lock);
-       update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
+       nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
        spin_unlock(&dir->i_lock);
 }
 
@@ -1356,6 +1370,12 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
                                NFS4_ACCESS_MODIFY |
                                NFS4_ACCESS_EXTEND |
                                NFS4_ACCESS_EXECUTE;
+#ifdef CONFIG_NFS_V4_2
+                       if (server->caps & NFS_CAP_XATTR)
+                               p->o_arg.access |= NFS4_ACCESS_XAREAD |
+                                   NFS4_ACCESS_XAWRITE |
+                                   NFS4_ACCESS_XALIST;
+#endif
                }
        }
        p->o_arg.clientid = server->nfs_client->cl_clientid;
@@ -2653,8 +2673,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data,
                        data->file_created = true;
                if (data->file_created ||
                    inode_peek_iversion_raw(dir) != o_res->cinfo.after)
-                       update_changeattr(dir, &o_res->cinfo,
-                                       o_res->f_attr->time_start, 0);
+                       nfs4_update_changeattr(dir, &o_res->cinfo,
+                                       o_res->f_attr->time_start,
+                                       NFS_INO_INVALID_DATA);
        }
        if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
                server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -3756,7 +3777,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
 
 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
-#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
+#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
 
 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
 {
@@ -4540,7 +4561,8 @@ _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
        status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
        if (status == 0) {
                spin_lock(&dir->i_lock);
-               update_changeattr_locked(dir, &res.cinfo, timestamp, 0);
+               nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
+                                             NFS_INO_INVALID_DATA);
                /* Removing a directory decrements nlink in the parent */
                if (ftype == NF4DIR && dir->i_nlink > 2)
                        nfs4_dec_nlink_locked(dir);
@@ -4624,8 +4646,9 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
                                    &data->timeout) == -EAGAIN)
                return 0;
        if (task->tk_status == 0)
-               update_changeattr(dir, &res->cinfo,
-                               res->dir_attr->time_start, 0);
+               nfs4_update_changeattr(dir, &res->cinfo,
+                               res->dir_attr->time_start,
+                               NFS_INO_INVALID_DATA);
        return 1;
 }
 
@@ -4669,16 +4692,18 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
        if (task->tk_status == 0) {
                if (new_dir != old_dir) {
                        /* Note: If we moved a directory, nlink will change */
-                       update_changeattr(old_dir, &res->old_cinfo,
+                       nfs4_update_changeattr(old_dir, &res->old_cinfo,
                                        res->old_fattr->time_start,
-                                       NFS_INO_INVALID_OTHER);
-                       update_changeattr(new_dir, &res->new_cinfo,
+                                       NFS_INO_INVALID_OTHER |
+                                           NFS_INO_INVALID_DATA);
+                       nfs4_update_changeattr(new_dir, &res->new_cinfo,
                                        res->new_fattr->time_start,
-                                       NFS_INO_INVALID_OTHER);
+                                       NFS_INO_INVALID_OTHER |
+                                           NFS_INO_INVALID_DATA);
                } else
-                       update_changeattr(old_dir, &res->old_cinfo,
+                       nfs4_update_changeattr(old_dir, &res->old_cinfo,
                                        res->old_fattr->time_start,
-                                       0);
+                                       NFS_INO_INVALID_DATA);
        }
        return 1;
 }
@@ -4719,7 +4744,8 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
 
        status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
        if (!status) {
-               update_changeattr(dir, &res.cinfo, res.fattr->time_start, 0);
+               nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
+                                      NFS_INO_INVALID_DATA);
                status = nfs_post_op_update_inode(inode, res.fattr);
                if (!status)
                        nfs_setsecurity(inode, res.fattr, res.label);
@@ -4797,8 +4823,9 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
                                    &data->arg.seq_args, &data->res.seq_res, 1);
        if (status == 0) {
                spin_lock(&dir->i_lock);
-               update_changeattr_locked(dir, &data->res.dir_cinfo,
-                               data->res.fattr->time_start, 0);
+               nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
+                               data->res.fattr->time_start,
+                               NFS_INO_INVALID_DATA);
                /* Creating a directory bumps nlink in the parent */
                if (data->arg.ftype == NF4DIR)
                        nfs4_inc_nlink_locked(dir);
@@ -5531,7 +5558,7 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
  */
 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
 
-static int buf_to_pages_noslab(const void *buf, size_t buflen,
+int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
                struct page **pages)
 {
        struct page *newpage, **spages;
@@ -5773,7 +5800,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
                return -EOPNOTSUPP;
        if (npages > ARRAY_SIZE(pages))
                return -ERANGE;
-       i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
+       i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
        if (i < 0)
                return i;
        nfs4_inode_make_writeable(inode);
@@ -5845,8 +5872,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
                return ret;
        if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
                return -ENOENT;
-       if (buflen < label.len)
-               return -ERANGE;
        return 0;
 }
 
@@ -7430,6 +7455,133 @@ nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
 
 #endif
 
+#ifdef CONFIG_NFS_V4_2
+static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, const void *buf,
+                                   size_t buflen, int flags)
+{
+       struct nfs_access_entry cache;
+       int ret;
+
+       if (!nfs_server_capable(inode, NFS_CAP_XATTR))
+               return -EOPNOTSUPP;
+
+       /*
+        * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
+        * flags right now. Handling of xattr operations use the normal
+        * file read/write permissions.
+        *
+        * Just in case the server has other ideas (which RFC 8276 allows),
+        * do a cached access check for the XA* flags to possibly avoid
+        * doing an RPC and getting EACCES back.
+        */
+       if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
+               if (!(cache.mask & NFS_ACCESS_XAWRITE))
+                       return -EACCES;
+       }
+
+       if (buf == NULL) {
+               ret = nfs42_proc_removexattr(inode, key);
+               if (!ret)
+                       nfs4_xattr_cache_remove(inode, key);
+       } else {
+               ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
+               if (!ret)
+                       nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
+       }
+
+       return ret;
+}
+
+static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
+                                   struct dentry *unused, struct inode *inode,
+                                   const char *key, void *buf, size_t buflen)
+{
+       struct nfs_access_entry cache;
+       ssize_t ret;
+
+       if (!nfs_server_capable(inode, NFS_CAP_XATTR))
+               return -EOPNOTSUPP;
+
+       if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
+               if (!(cache.mask & NFS_ACCESS_XAREAD))
+                       return -EACCES;
+       }
+
+       ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       if (ret)
+               return ret;
+
+       ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
+       if (ret >= 0 || (ret < 0 && ret != -ENOENT))
+               return ret;
+
+       ret = nfs42_proc_getxattr(inode, key, buf, buflen);
+
+       return ret;
+}
+
+static ssize_t
+nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
+{
+       u64 cookie;
+       bool eof;
+       ssize_t ret, size;
+       char *buf;
+       size_t buflen;
+       struct nfs_access_entry cache;
+
+       if (!nfs_server_capable(inode, NFS_CAP_XATTR))
+               return 0;
+
+       if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
+               if (!(cache.mask & NFS_ACCESS_XALIST))
+                       return 0;
+       }
+
+       ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       if (ret)
+               return ret;
+
+       ret = nfs4_xattr_cache_list(inode, list, list_len);
+       if (ret >= 0 || (ret < 0 && ret != -ENOENT))
+               return ret;
+
+       cookie = 0;
+       eof = false;
+       buflen = list_len ? list_len : XATTR_LIST_MAX;
+       buf = list_len ? list : NULL;
+       size = 0;
+
+       while (!eof) {
+               ret = nfs42_proc_listxattrs(inode, buf, buflen,
+                   &cookie, &eof);
+               if (ret < 0)
+                       return ret;
+
+               if (list_len) {
+                       buf += ret;
+                       buflen -= ret;
+               }
+               size += ret;
+       }
+
+       if (list_len)
+               nfs4_xattr_cache_set_list(inode, list, size);
+
+       return size;
+}
+
+#else
+
+static ssize_t
+nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
+{
+       return 0;
+}
+#endif /* CONFIG_NFS_V4_2 */
+
 /*
  * nfs_fhget will use either the mounted_on_fileid or the fileid
  */
@@ -10035,7 +10187,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
 
 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
 {
-       ssize_t error, error2;
+       ssize_t error, error2, error3;
 
        error = generic_listxattr(dentry, list, size);
        if (error < 0)
@@ -10048,7 +10200,17 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
        error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
        if (error2 < 0)
                return error2;
-       return error + error2;
+
+       if (list) {
+               list += error2;
+               size -= error2;
+       }
+
+       error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
+       if (error3 < 0)
+               return error3;
+
+       return error + error2 + error3;
 }
 
 static const struct inode_operations nfs4_dir_inode_operations = {
@@ -10136,11 +10298,22 @@ static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
        .set    = nfs4_xattr_set_nfs4_acl,
 };
 
+#ifdef CONFIG_NFS_V4_2
+static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
+       .prefix = XATTR_USER_PREFIX,
+       .get    = nfs4_xattr_get_nfs4_user,
+       .set    = nfs4_xattr_set_nfs4_user,
+};
+#endif
+
 const struct xattr_handler *nfs4_xattr_handlers[] = {
        &nfs4_xattr_nfs4_acl_handler,
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
        &nfs4_xattr_nfs4_label_handler,
 #endif
+#ifdef CONFIG_NFS_V4_2
+       &nfs4_xattr_nfs4_user_handler,
+#endif
        NULL
 };
 
index a8dc25c..b1dba24 100644 (file)
@@ -509,7 +509,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
        nfs4_init_seqid_counter(&sp->so_seqid);
        atomic_set(&sp->so_count, 1);
        INIT_LIST_HEAD(&sp->so_lru);
-       seqcount_init(&sp->so_reclaim_seqcount);
+       seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock);
        mutex_init(&sp->so_delegreturn_mutex);
        return sp;
 }
index 1475f93..0c1ab84 100644 (file)
@@ -69,6 +69,7 @@ static void nfs4_evict_inode(struct inode *inode)
        pnfs_destroy_layout(NFS_I(inode));
        /* First call standard NFS clear_inode() code */
        nfs_clear_inode(inode);
+       nfs4_xattr_cache_zap(inode);
 }
 
 struct nfs_referral_count {
@@ -268,6 +269,12 @@ static int __init init_nfs_v4(void)
        if (err)
                goto out1;
 
+#ifdef CONFIG_NFS_V4_2
+       err = nfs4_xattr_cache_init();
+       if (err)
+               goto out2;
+#endif
+
        err = nfs4_register_sysctl();
        if (err)
                goto out2;
@@ -288,6 +295,9 @@ static void __exit exit_nfs_v4(void)
        nfs4_pnfs_v3_ds_connect_unload();
 
        unregister_nfs_version(&nfs_v4);
+#ifdef CONFIG_NFS_V4_2
+       nfs4_xattr_cache_exit();
+#endif
        nfs4_unregister_sysctl();
        nfs_idmap_quit();
        nfs_dns_resolver_destroy();
index 5435411..b4f852d 100644 (file)
@@ -1727,6 +1727,13 @@ DEFINE_NFS4_IDMAP_EVENT(nfs4_map_group_to_gid);
 DEFINE_NFS4_IDMAP_EVENT(nfs4_map_uid_to_name);
 DEFINE_NFS4_IDMAP_EVENT(nfs4_map_gid_to_group);
 
+#ifdef CONFIG_NFS_V4_1
+#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) \
+       (lseg ? nfs_stateid_hash(&lseg->pls_layout->plh_stateid) : 0)
+#else
+#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) (0)
+#endif
+
 DECLARE_EVENT_CLASS(nfs4_read_event,
                TP_PROTO(
                        const struct nfs_pgio_header *hdr,
@@ -1745,6 +1752,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
                        __field(unsigned long, error)
                        __field(int, stateid_seq)
                        __field(u32, stateid_hash)
+                       __field(int, layoutstateid_seq)
+                       __field(u32, layoutstateid_hash)
                ),
 
                TP_fast_assign(
@@ -1754,6 +1763,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
                                                  hdr->args.fh : &nfsi->fh;
                        const struct nfs4_state *state =
                                hdr->args.context->state;
+                       const struct pnfs_layout_segment *lseg = hdr->lseg;
 
                        __entry->dev = inode->i_sb->s_dev;
                        __entry->fileid = nfsi->fileid;
@@ -1766,11 +1776,15 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
                                be32_to_cpu(state->stateid.seqid);
                        __entry->stateid_hash =
                                nfs_stateid_hash(&state->stateid);
+                       __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0;
+                       __entry->layoutstateid_hash =
+                               NFS4_LSEG_LAYOUT_STATEID_HASH(lseg);
                ),
 
                TP_printk(
                        "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-                       "offset=%lld count=%u res=%u stateid=%d:0x%08x",
+                       "offset=%lld count=%u res=%u stateid=%d:0x%08x "
+                       "layoutstateid=%d:0x%08x",
                        -__entry->error,
                        show_nfsv4_errors(__entry->error),
                        MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1778,7 +1792,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
                        __entry->fhandle,
                        (long long)__entry->offset,
                        __entry->arg_count, __entry->res_count,
-                       __entry->stateid_seq, __entry->stateid_hash
+                       __entry->stateid_seq, __entry->stateid_hash,
+                       __entry->layoutstateid_seq, __entry->layoutstateid_hash
                )
 );
 #define DEFINE_NFS4_READ_EVENT(name) \
@@ -1811,6 +1826,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
                        __field(unsigned long, error)
                        __field(int, stateid_seq)
                        __field(u32, stateid_hash)
+                       __field(int, layoutstateid_seq)
+                       __field(u32, layoutstateid_hash)
                ),
 
                TP_fast_assign(
@@ -1820,6 +1837,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
                                                  hdr->args.fh : &nfsi->fh;
                        const struct nfs4_state *state =
                                hdr->args.context->state;
+                       const struct pnfs_layout_segment *lseg = hdr->lseg;
 
                        __entry->dev = inode->i_sb->s_dev;
                        __entry->fileid = nfsi->fileid;
@@ -1832,11 +1850,15 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
                                be32_to_cpu(state->stateid.seqid);
                        __entry->stateid_hash =
                                nfs_stateid_hash(&state->stateid);
+                       __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0;
+                       __entry->layoutstateid_hash =
+                               NFS4_LSEG_LAYOUT_STATEID_HASH(lseg);
                ),
 
                TP_printk(
                        "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-                       "offset=%lld count=%u res=%u stateid=%d:0x%08x",
+                       "offset=%lld count=%u res=%u stateid=%d:0x%08x "
+                       "layoutstateid=%d:0x%08x",
                        -__entry->error,
                        show_nfsv4_errors(__entry->error),
                        MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1844,7 +1866,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
                        __entry->fhandle,
                        (long long)__entry->offset,
                        __entry->arg_count, __entry->res_count,
-                       __entry->stateid_seq, __entry->stateid_hash
+                       __entry->stateid_seq, __entry->stateid_hash,
+                       __entry->layoutstateid_seq, __entry->layoutstateid_hash
                )
 );
 
@@ -1875,6 +1898,8 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
                        __field(unsigned long, error)
                        __field(loff_t, offset)
                        __field(u32, count)
+                       __field(int, layoutstateid_seq)
+                       __field(u32, layoutstateid_hash)
                ),
 
                TP_fast_assign(
@@ -1882,6 +1907,7 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
                        const struct nfs_inode *nfsi = NFS_I(inode);
                        const struct nfs_fh *fh = data->args.fh ?
                                                  data->args.fh : &nfsi->fh;
+                       const struct pnfs_layout_segment *lseg = data->lseg;
 
                        __entry->dev = inode->i_sb->s_dev;
                        __entry->fileid = nfsi->fileid;
@@ -1889,18 +1915,22 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
                        __entry->offset = data->args.offset;
                        __entry->count = data->args.count;
                        __entry->error = error < 0 ? -error : 0;
+                       __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0;
+                       __entry->layoutstateid_hash =
+                               NFS4_LSEG_LAYOUT_STATEID_HASH(lseg);
                ),
 
                TP_printk(
                        "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
-                       "offset=%lld count=%u",
+                       "offset=%lld count=%u layoutstateid=%d:0x%08x",
                        -__entry->error,
                        show_nfsv4_errors(__entry->error),
                        MAJOR(__entry->dev), MINOR(__entry->dev),
                        (unsigned long long)__entry->fileid,
                        __entry->fhandle,
                        (long long)__entry->offset,
-                       __entry->count
+                       __entry->count,
+                       __entry->layoutstateid_seq, __entry->layoutstateid_hash
                )
 );
 #define DEFINE_NFS4_COMMIT_EVENT(name) \
@@ -1993,7 +2023,9 @@ TRACE_EVENT(nfs4_layoutget,
 
 DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutcommit);
 DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutreturn);
-DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close);
+DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutreturn_on_close);
+DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layouterror);
+DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutstats);
 
 TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_UNKNOWN);
 TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_NO_PNFS);
index 47817ef..0b3510f 100644 (file)
@@ -4166,7 +4166,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
                        return -EIO;
                if (len < NFS4_MAXLABELLEN) {
                        if (label) {
-                               memcpy(label->label, p, len);
+                               if (label->len) {
+                                       if (label->len < len)
+                                               return -ERANGE;
+                                       memcpy(label->label, p, len);
+                               }
                                label->len = len;
                                label->pi = pi;
                                label->lfs = lfs;
@@ -4201,6 +4205,26 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
        return status;
 }
 
+static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap,
+                                   uint32_t *res)
+{
+       __be32 *p;
+
+       *res = 0;
+       if (unlikely(bitmap[2] & (FATTR4_WORD2_XATTR_SUPPORT - 1U)))
+               return -EIO;
+       if (likely(bitmap[2] & FATTR4_WORD2_XATTR_SUPPORT)) {
+               p = xdr_inline_decode(xdr, 4);
+               if (unlikely(!p))
+                       return -EIO;
+               *res = be32_to_cpup(p);
+               bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT;
+       }
+       dprintk("%s: XATTR support=%s\n", __func__,
+               *res == 0 ? "false" : "true");
+       return 0;
+}
+
 static int verify_attr_len(struct xdr_stream *xdr, unsigned int savep, uint32_t attrlen)
 {
        unsigned int attrwords = XDR_QUADLEN(attrlen);
@@ -4855,6 +4879,11 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
        if (status)
                goto xdr_error;
 
+       status = decode_attr_xattrsupport(xdr, bitmap,
+                                         &fsinfo->xattr_support);
+       if (status)
+               goto xdr_error;
+
        status = verify_attr_len(xdr, savep, attrlen);
 xdr_error:
        dprintk("%s: xdr returned %d!\n", __func__, -status);
@@ -5227,7 +5256,7 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
         * The XDR encode routine has set things up so that
         * the link text will be copied directly into the
         * buffer.  We just have to do overflow-checking,
-        * and and null-terminate the text (the VFS expects
+        * and null-terminate the text (the VFS expects
         * null-termination).
         */
        xdr_terminate_string(rcvbuf, len);
@@ -7456,6 +7485,8 @@ static struct {
        { NFS4ERR_SYMLINK,      -ELOOP          },
        { NFS4ERR_OP_ILLEGAL,   -EOPNOTSUPP     },
        { NFS4ERR_DEADLOCK,     -EDEADLK        },
+       { NFS4ERR_NOXATTR,      -ENODATA        },
+       { NFS4ERR_XATTR2BIG,    -E2BIG          },
        { -1,                   -EIO            }
 };
 
@@ -7584,6 +7615,10 @@ const struct rpc_procinfo nfs4_procedures[] = {
        PROC42(COPY_NOTIFY,     enc_copy_notify,        dec_copy_notify),
        PROC(LOOKUPP,           enc_lookupp,            dec_lookupp),
        PROC42(LAYOUTERROR,     enc_layouterror,        dec_layouterror),
+       PROC42(GETXATTR,        enc_getxattr,           dec_getxattr),
+       PROC42(SETXATTR,        enc_setxattr,           dec_setxattr),
+       PROC42(LISTXATTRS,      enc_listxattrs,         dec_listxattrs),
+       PROC42(REMOVEXATTR,     enc_removexattr,        dec_removexattr),
 };
 
 static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
index 547cec7..5a59dcd 100644 (file)
@@ -59,7 +59,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER);
                        { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \
                        { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \
                        { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \
-                       { NFS_INO_INVALID_OTHER, "INVALID_OTHER" })
+                       { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \
+                       { NFS_INO_INVALID_XATTR, "INVALID_XATTR" })
 
 TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS);
 TRACE_DEFINE_ENUM(NFS_INO_STALE);
index dd2e14f..40332c7 100644 (file)
@@ -1226,31 +1226,27 @@ out:
        return status;
 }
 
+static bool
+pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
+                               enum pnfs_iomode iomode,
+                               u32 seq)
+{
+       struct pnfs_layout_range recall_range = {
+               .length = NFS4_MAX_UINT64,
+               .iomode = iomode,
+       };
+       return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+                                              &recall_range, seq) != -EBUSY;
+}
+
 /* Return true if layoutreturn is needed */
 static bool
 pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
 {
-       struct pnfs_layout_segment *s;
-       enum pnfs_iomode iomode;
-       u32 seq;
-
        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
                return false;
-
-       seq = lo->plh_return_seq;
-       iomode = lo->plh_return_iomode;
-
-       /* Defer layoutreturn until all recalled lsegs are done */
-       list_for_each_entry(s, &lo->plh_segs, pls_list) {
-               if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
-                       continue;
-               if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
-                       continue;
-               if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
-                       return false;
-       }
-
-       return true;
+       return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
+                                              lo->plh_return_seq);
 }
 
 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
@@ -1549,12 +1545,12 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
        default:
                arg_stateid = &args->stateid;
        }
+       trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
        pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
                        res_stateid);
        if (ld_private && ld_private->ops && ld_private->ops->free)
                ld_private->ops->free(ld_private);
        pnfs_put_layout_hdr(lo);
-       trace_nfs4_layoutreturn_on_close(args->inode, 0);
 }
 
 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
@@ -2392,16 +2388,6 @@ out_forget:
        return ERR_PTR(-EAGAIN);
 }
 
-static int
-mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
-               struct list_head *tmp_list)
-{
-       if (!mark_lseg_invalid(lseg, tmp_list))
-               return 0;
-       pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
-       return 1;
-}
-
 /**
  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
  * @lo: pointer to layout header
@@ -2438,7 +2424,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
                                lseg, lseg->pls_range.iomode,
                                lseg->pls_range.offset,
                                lseg->pls_range.length);
-                       if (mark_lseg_invalid_or_return(lseg, tmp_list))
+                       if (mark_lseg_invalid(lseg, tmp_list))
                                continue;
                        remaining++;
                        set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
@@ -2953,7 +2939,8 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
 }
 
 /* Resend all requests through pnfs. */
-void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
+                          unsigned int mirror_idx)
 {
        struct nfs_pageio_descriptor pgio;
 
@@ -2964,6 +2951,7 @@ void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
 
                nfs_pageio_init_read(&pgio, hdr->inode, false,
                                        hdr->completion_ops);
+               pgio.pg_mirror_idx = mirror_idx;
                hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
        }
 }
index 8e0ada5..2661c44 100644 (file)
@@ -311,7 +311,7 @@ int _pnfs_return_layout(struct inode *);
 int pnfs_commit_and_return_layout(struct inode *);
 void pnfs_ld_write_done(struct nfs_pgio_header *);
 void pnfs_ld_read_done(struct nfs_pgio_header *);
-void pnfs_read_resend_pnfs(struct nfs_pgio_header *);
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *, unsigned int mirror_idx);
 struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
                                               struct nfs_open_context *ctx,
                                               loff_t pos,
index 9217cb6..7346acd 100644 (file)
@@ -171,7 +171,7 @@ struct nfsd_net {
        unsigned int             longest_chain_cachesize;
 
        struct shrinker         nfsd_reply_cache_shrinker;
-       /* utsname taken from the the process that starts the server */
+       /* utsname taken from the process that starts the server */
        char                    nfsd_name[UNX_MAXNODENAME+1];
 };
 
index 9460be8..f92161c 100644 (file)
@@ -168,7 +168,7 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
                        ent->id);
        if (test_bit(CACHE_VALID, &h->flags))
                seq_printf(m, " %s", ent->name);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        return 0;
 }
 
@@ -346,7 +346,7 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
                        ent->name);
        if (test_bit(CACHE_VALID, &h->flags))
                seq_printf(m, " %u", ent->id);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        return 0;
 }
 
index a09c35f..a527da3 100644 (file)
@@ -566,8 +566,14 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
             union nfsd4_op_u *u)
 {
        struct nfsd4_access *access = &u->access;
+       u32 access_full;
 
-       if (access->ac_req_access & ~NFS3_ACCESS_FULL)
+       access_full = NFS3_ACCESS_FULL;
+       if (cstate->minorversion >= 2)
+               access_full |= NFS4_ACCESS_XALIST | NFS4_ACCESS_XAREAD |
+                              NFS4_ACCESS_XAWRITE;
+
+       if (access->ac_req_access & ~access_full)
                return nfserr_inval;
 
        access->ac_resp_access = access->ac_req_access;
@@ -2091,6 +2097,68 @@ out:
 }
 #endif /* CONFIG_NFSD_PNFS */
 
+static __be32
+nfsd4_getxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+              union nfsd4_op_u *u)
+{
+       struct nfsd4_getxattr *getxattr = &u->getxattr;
+
+       return nfsd_getxattr(rqstp, &cstate->current_fh,
+                            getxattr->getxa_name, &getxattr->getxa_buf,
+                            &getxattr->getxa_len);
+}
+
+static __be32
+nfsd4_setxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+          union nfsd4_op_u *u)
+{
+       struct nfsd4_setxattr *setxattr = &u->setxattr;
+       __be32 ret;
+
+       if (opens_in_grace(SVC_NET(rqstp)))
+               return nfserr_grace;
+
+       ret = nfsd_setxattr(rqstp, &cstate->current_fh, setxattr->setxa_name,
+                           setxattr->setxa_buf, setxattr->setxa_len,
+                           setxattr->setxa_flags);
+
+       if (!ret)
+               set_change_info(&setxattr->setxa_cinfo, &cstate->current_fh);
+
+       return ret;
+}
+
+static __be32
+nfsd4_listxattrs(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+          union nfsd4_op_u *u)
+{
+       /*
+        * Get the entire list, then copy out only the user attributes
+        * in the encode function.
+        */
+       return nfsd_listxattr(rqstp, &cstate->current_fh,
+                            &u->listxattrs.lsxa_buf, &u->listxattrs.lsxa_len);
+}
+
+static __be32
+nfsd4_removexattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+          union nfsd4_op_u *u)
+{
+       struct nfsd4_removexattr *removexattr = &u->removexattr;
+       __be32 ret;
+
+       if (opens_in_grace(SVC_NET(rqstp)))
+               return nfserr_grace;
+
+       ret = nfsd_removexattr(rqstp, &cstate->current_fh,
+           removexattr->rmxa_name);
+
+       if (!ret)
+               set_change_info(&removexattr->rmxa_cinfo, &cstate->current_fh);
+
+       return ret;
+}
+
 /*
  * NULL call.
  */
@@ -2700,6 +2768,42 @@ static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
        return (op_encode_hdr_size + 3) * sizeof(__be32);
 }
 
+static inline u32 nfsd4_getxattr_rsize(struct svc_rqst *rqstp,
+                                      struct nfsd4_op *op)
+{
+       u32 maxcount, rlen;
+
+       maxcount = svc_max_payload(rqstp);
+       rlen = min_t(u32, XATTR_SIZE_MAX, maxcount);
+
+       return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_setxattr_rsize(struct svc_rqst *rqstp,
+                                      struct nfsd4_op *op)
+{
+       return (op_encode_hdr_size + op_encode_change_info_maxsz)
+               * sizeof(__be32);
+}
+static inline u32 nfsd4_listxattrs_rsize(struct svc_rqst *rqstp,
+                                        struct nfsd4_op *op)
+{
+       u32 maxcount, rlen;
+
+       maxcount = svc_max_payload(rqstp);
+       rlen = min(op->u.listxattrs.lsxa_maxcount, maxcount);
+
+       return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32);
+}
+
+static inline u32 nfsd4_removexattr_rsize(struct svc_rqst *rqstp,
+                                         struct nfsd4_op *op)
+{
+       return (op_encode_hdr_size + op_encode_change_info_maxsz)
+               * sizeof(__be32);
+}
+
+
 static const struct nfsd4_operation nfsd4_ops[] = {
        [OP_ACCESS] = {
                .op_func = nfsd4_access,
@@ -3081,6 +3185,28 @@ static const struct nfsd4_operation nfsd4_ops[] = {
                .op_name = "OP_COPY_NOTIFY",
                .op_rsize_bop = nfsd4_copy_notify_rsize,
        },
+       [OP_GETXATTR] = {
+               .op_func = nfsd4_getxattr,
+               .op_name = "OP_GETXATTR",
+               .op_rsize_bop = nfsd4_getxattr_rsize,
+       },
+       [OP_SETXATTR] = {
+               .op_func = nfsd4_setxattr,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_SETXATTR",
+               .op_rsize_bop = nfsd4_setxattr_rsize,
+       },
+       [OP_LISTXATTRS] = {
+               .op_func = nfsd4_listxattrs,
+               .op_name = "OP_LISTXATTRS",
+               .op_rsize_bop = nfsd4_listxattrs_rsize,
+       },
+       [OP_REMOVEXATTR] = {
+               .op_func = nfsd4_removexattr,
+               .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
+               .op_name = "OP_REMOVEXATTR",
+               .op_rsize_bop = nfsd4_removexattr_rsize,
+       },
 };
 
 /**
index 9e40dfe..186fa2c 100644 (file)
@@ -747,13 +747,11 @@ struct cld_upcall {
 };
 
 static int
-__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg)
+__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn)
 {
        int ret;
        struct rpc_pipe_msg msg;
        struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_u);
-       struct nfsd_net *nn = net_generic(pipe->dentry->d_sb->s_fs_info,
-                                         nfsd_net_id);
 
        memset(&msg, 0, sizeof(msg));
        msg.data = cmsg;
@@ -773,7 +771,7 @@ out:
 }
 
 static int
-cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg)
+cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn)
 {
        int ret;
 
@@ -782,7 +780,7 @@ cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg)
         *  upcalls queued.
         */
        do {
-               ret = __cld_pipe_upcall(pipe, cmsg);
+               ret = __cld_pipe_upcall(pipe, cmsg, nn);
        } while (ret == -EAGAIN);
 
        return ret;
@@ -1115,7 +1113,7 @@ nfsd4_cld_create(struct nfs4_client *clp)
        memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
                        clp->cl_name.len);
 
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret) {
                ret = cup->cu_u.cu_msg.cm_status;
                set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
@@ -1180,7 +1178,7 @@ nfsd4_cld_create_v2(struct nfs4_client *clp)
        } else
                cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0;
 
-       ret = cld_pipe_upcall(cn->cn_pipe, cmsg);
+       ret = cld_pipe_upcall(cn->cn_pipe, cmsg, nn);
        if (!ret) {
                ret = cmsg->cm_status;
                set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
@@ -1218,7 +1216,7 @@ nfsd4_cld_remove(struct nfs4_client *clp)
        memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
                        clp->cl_name.len);
 
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret) {
                ret = cup->cu_u.cu_msg.cm_status;
                clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
@@ -1261,7 +1259,7 @@ nfsd4_cld_check_v0(struct nfs4_client *clp)
        memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
                        clp->cl_name.len);
 
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret) {
                ret = cup->cu_u.cu_msg.cm_status;
                set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
@@ -1404,7 +1402,7 @@ nfsd4_cld_grace_start(struct nfsd_net *nn)
        }
 
        cup->cu_u.cu_msg.cm_cmd = Cld_GraceStart;
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret)
                ret = cup->cu_u.cu_msg.cm_status;
 
@@ -1432,7 +1430,7 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn)
 
        cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
        cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time;
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret)
                ret = cup->cu_u.cu_msg.cm_status;
 
@@ -1460,7 +1458,7 @@ nfsd4_cld_grace_done(struct nfsd_net *nn)
        }
 
        cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret)
                ret = cup->cu_u.cu_msg.cm_status;
 
@@ -1524,7 +1522,7 @@ nfsd4_cld_get_version(struct nfsd_net *nn)
                goto out_err;
        }
        cup->cu_u.cu_msg.cm_cmd = Cld_GetVersion;
-       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
+       ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn);
        if (!ret) {
                ret = cup->cu_u.cu_msg.cm_status;
                if (ret)
index c905631..81ed8e8 100644 (file)
@@ -4940,6 +4940,32 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
        return fl;
 }
 
+static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
+                                               struct nfs4_file *fp)
+{
+       struct nfs4_clnt_odstate *co;
+       struct file *f = fp->fi_deleg_file->nf_file;
+       struct inode *ino = locks_inode(f);
+       int writes = atomic_read(&ino->i_writecount);
+
+       if (fp->fi_fds[O_WRONLY])
+               writes--;
+       if (fp->fi_fds[O_RDWR])
+               writes--;
+       WARN_ON_ONCE(writes < 0);
+       if (writes > 0)
+               return -EAGAIN;
+       spin_lock(&fp->fi_lock);
+       list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
+               if (co->co_client != clp) {
+                       spin_unlock(&fp->fi_lock);
+                       return -EAGAIN;
+               }
+       }
+       spin_unlock(&fp->fi_lock);
+       return 0;
+}
+
 static struct nfs4_delegation *
 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
                    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
@@ -4959,9 +4985,12 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
 
        nf = find_readable_file(fp);
        if (!nf) {
-               /* We should always have a readable file here */
-               WARN_ON_ONCE(1);
-               return ERR_PTR(-EBADF);
+               /*
+                * We probably could attempt another open and get a read
+                * delegation, but for now, don't bother until the
+                * client actually sends us one.
+                */
+               return ERR_PTR(-EAGAIN);
        }
        spin_lock(&state_lock);
        spin_lock(&fp->fi_lock);
@@ -4991,11 +5020,19 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
        if (!fl)
                goto out_clnt_odstate;
 
+       status = nfsd4_check_conflicting_opens(clp, fp);
+       if (status) {
+               locks_free_lock(fl);
+               goto out_clnt_odstate;
+       }
        status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
        if (fl)
                locks_free_lock(fl);
        if (status)
                goto out_clnt_odstate;
+       status = nfsd4_check_conflicting_opens(clp, fp);
+       if (status)
+               goto out_clnt_odstate;
 
        spin_lock(&state_lock);
        spin_lock(&fp->fi_lock);
@@ -5077,17 +5114,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
                                goto out_no_deleg;
                        if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
                                goto out_no_deleg;
-                       /*
-                        * Also, if the file was opened for write or
-                        * create, there's a good chance the client's
-                        * about to write to it, resulting in an
-                        * immediate recall (since we don't support
-                        * write delegations):
-                        */
-                       if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
-                               goto out_no_deleg;
-                       if (open->op_create == NFS4_OPEN_CREATE)
-                               goto out_no_deleg;
                        break;
                default:
                        goto out_no_deleg;
index 996ac01..259d5ad 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/pagemap.h>
 #include <linux/sunrpc/svcauth_gss.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/xattr.h>
+#include <uapi/linux/xattr.h>
 
 #include "idmap.h"
 #include "acl.h"
@@ -257,6 +259,44 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
        return p;
 }
 
+static __be32
+svcxdr_construct_vector(struct nfsd4_compoundargs *argp, struct kvec *head,
+                       struct page ***pagelist, u32 buflen)
+{
+       int avail;
+       int len;
+       int pages;
+
+       /* Sorry .. no magic macros for this.. *
+        * READ_BUF(write->wr_buflen);
+        * SAVEMEM(write->wr_buf, write->wr_buflen);
+        */
+       avail = (char *)argp->end - (char *)argp->p;
+       if (avail + argp->pagelen < buflen) {
+               dprintk("NFSD: xdr error (%s:%d)\n",
+                              __FILE__, __LINE__);
+               return nfserr_bad_xdr;
+       }
+       head->iov_base = argp->p;
+       head->iov_len = avail;
+       *pagelist = argp->pagelist;
+
+       len = XDR_QUADLEN(buflen) << 2;
+       if (len >= avail) {
+               len -= avail;
+
+               pages = len >> PAGE_SHIFT;
+               argp->pagelist += pages;
+               argp->pagelen -= pages * PAGE_SIZE;
+               len -= pages * PAGE_SIZE;
+
+               next_decode_page(argp);
+       }
+       argp->p += XDR_QUADLEN(len);
+
+       return 0;
+}
+
 /**
  * savemem - duplicate a chunk of memory for later processing
  * @argp: NFSv4 compound argument structure to be freed with
@@ -1265,8 +1305,6 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
 static __be32
 nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
 {
-       int avail;
-       int len;
        DECODE_HEAD;
 
        status = nfsd4_decode_stateid(argp, &write->wr_stateid);
@@ -1279,34 +1317,10 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
                goto xdr_error;
        write->wr_buflen = be32_to_cpup(p++);
 
-       /* Sorry .. no magic macros for this.. *
-        * READ_BUF(write->wr_buflen);
-        * SAVEMEM(write->wr_buf, write->wr_buflen);
-        */
-       avail = (char*)argp->end - (char*)argp->p;
-       if (avail + argp->pagelen < write->wr_buflen) {
-               dprintk("NFSD: xdr error (%s:%d)\n",
-                               __FILE__, __LINE__);
-               goto xdr_error;
-       }
-       write->wr_head.iov_base = p;
-       write->wr_head.iov_len = avail;
-       write->wr_pagelist = argp->pagelist;
-
-       len = XDR_QUADLEN(write->wr_buflen) << 2;
-       if (len >= avail) {
-               int pages;
-
-               len -= avail;
-
-               pages = len >> PAGE_SHIFT;
-               argp->pagelist += pages;
-               argp->pagelen -= pages * PAGE_SIZE;
-               len -= pages * PAGE_SIZE;
-
-               next_decode_page(argp);
-       }
-       argp->p += XDR_QUADLEN(len);
+       status = svcxdr_construct_vector(argp, &write->wr_head,
+                                        &write->wr_pagelist, write->wr_buflen);
+       if (status)
+               return status;
 
        DECODE_TAIL;
 }
@@ -1865,6 +1879,208 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
        DECODE_TAIL;
 }
 
+/*
+ * XDR data that is more than PAGE_SIZE in size is normally part of a
+ * read or write. However, the size of extended attributes is limited
+ * by the maximum request size, and then further limited by the underlying
+ * filesystem limits. This can exceed PAGE_SIZE (currently, XATTR_SIZE_MAX
+ * is 64k). Since there is no kvec- or page-based interface to xattrs,
+ * and we're not dealing with contiguous pages, we need to do some copying.
+ */
+
+/*
+ * Decode data into buffer. Uses head and pages constructed by
+ * svcxdr_construct_vector.
+ */
+static __be32
+nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct kvec *head,
+                      struct page **pages, char **bufp, u32 buflen)
+{
+       char *tmp, *dp;
+       u32 len;
+
+       if (buflen <= head->iov_len) {
+               /*
+                * We're in luck, the head has enough space. Just return
+                * the head, no need for copying.
+                */
+               *bufp = head->iov_base;
+               return 0;
+       }
+
+       tmp = svcxdr_tmpalloc(argp, buflen);
+       if (tmp == NULL)
+               return nfserr_jukebox;
+
+       dp = tmp;
+       memcpy(dp, head->iov_base, head->iov_len);
+       buflen -= head->iov_len;
+       dp += head->iov_len;
+
+       while (buflen > 0) {
+               len = min_t(u32, buflen, PAGE_SIZE);
+               memcpy(dp, page_address(*pages), len);
+
+               buflen -= len;
+               dp += len;
+               pages++;
+       }
+
+       *bufp = tmp;
+       return 0;
+}
+
+/*
+ * Get a user extended attribute name from the XDR buffer.
+ * It will not have the "user." prefix, so prepend it.
+ * Lastly, check for nul characters in the name.
+ */
+static __be32
+nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep)
+{
+       DECODE_HEAD;
+       char *name, *sp, *dp;
+       u32 namelen, cnt;
+
+       READ_BUF(4);
+       namelen = be32_to_cpup(p++);
+
+       if (namelen > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN))
+               return nfserr_nametoolong;
+
+       if (namelen == 0)
+               goto xdr_error;
+
+       READ_BUF(namelen);
+
+       name = svcxdr_tmpalloc(argp, namelen + XATTR_USER_PREFIX_LEN + 1);
+       if (!name)
+               return nfserr_jukebox;
+
+       memcpy(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+
+       /*
+        * Copy the extended attribute name over while checking for 0
+        * characters.
+        */
+       sp = (char *)p;
+       dp = name + XATTR_USER_PREFIX_LEN;
+       cnt = namelen;
+
+       while (cnt-- > 0) {
+               if (*sp == '\0')
+                       goto xdr_error;
+               *dp++ = *sp++;
+       }
+       *dp = '\0';
+
+       *namep = name;
+
+       DECODE_TAIL;
+}
+
+/*
+ * A GETXATTR op request comes without a length specifier. We just set the
+ * maximum length for the reply based on XATTR_SIZE_MAX and the maximum
+ * channel reply size. nfsd_getxattr will probe the length of the xattr,
+ * check it against getxa_len, and allocate + return the value.
+ */
+static __be32
+nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp,
+                     struct nfsd4_getxattr *getxattr)
+{
+       __be32 status;
+       u32 maxcount;
+
+       status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name);
+       if (status)
+               return status;
+
+       maxcount = svc_max_payload(argp->rqstp);
+       maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount);
+
+       getxattr->getxa_len = maxcount;
+
+       return status;
+}
+
+static __be32
+nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp,
+                     struct nfsd4_setxattr *setxattr)
+{
+       DECODE_HEAD;
+       u32 flags, maxcount, size;
+       struct kvec head;
+       struct page **pagelist;
+
+       READ_BUF(4);
+       flags = be32_to_cpup(p++);
+
+       if (flags > SETXATTR4_REPLACE)
+               return nfserr_inval;
+       setxattr->setxa_flags = flags;
+
+       status = nfsd4_decode_xattr_name(argp, &setxattr->setxa_name);
+       if (status)
+               return status;
+
+       maxcount = svc_max_payload(argp->rqstp);
+       maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount);
+
+       READ_BUF(4);
+       size = be32_to_cpup(p++);
+       if (size > maxcount)
+               return nfserr_xattr2big;
+
+       setxattr->setxa_len = size;
+       if (size > 0) {
+               status = svcxdr_construct_vector(argp, &head, &pagelist, size);
+               if (status)
+                       return status;
+
+               status = nfsd4_vbuf_from_vector(argp, &head, pagelist,
+                   &setxattr->setxa_buf, size);
+       }
+
+       DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp,
+                       struct nfsd4_listxattrs *listxattrs)
+{
+       DECODE_HEAD;
+       u32 maxcount;
+
+       READ_BUF(12);
+       p = xdr_decode_hyper(p, &listxattrs->lsxa_cookie);
+
+       /*
+        * If the cookie  is too large to have even one user.x attribute
+        * plus trailing '\0' left in a maximum size buffer, it's invalid.
+        */
+       if (listxattrs->lsxa_cookie >=
+           (XATTR_LIST_MAX / (XATTR_USER_PREFIX_LEN + 2)))
+               return nfserr_badcookie;
+
+       maxcount = be32_to_cpup(p++);
+       if (maxcount < 8)
+               /* Always need at least 2 words (length and one character) */
+               return nfserr_inval;
+
+       maxcount = min(maxcount, svc_max_payload(argp->rqstp));
+       listxattrs->lsxa_maxcount = maxcount;
+
+       DECODE_TAIL;
+}
+
+static __be32
+nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp,
+                        struct nfsd4_removexattr *removexattr)
+{
+       return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name);
+}
+
 static __be32
 nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p)
 {
@@ -1961,6 +2177,11 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
        [OP_SEEK]               = (nfsd4_dec)nfsd4_decode_seek,
        [OP_WRITE_SAME]         = (nfsd4_dec)nfsd4_decode_notsupp,
        [OP_CLONE]              = (nfsd4_dec)nfsd4_decode_clone,
+       /* RFC 8276 extended atributes operations */
+       [OP_GETXATTR]           = (nfsd4_dec)nfsd4_decode_getxattr,
+       [OP_SETXATTR]           = (nfsd4_dec)nfsd4_decode_setxattr,
+       [OP_LISTXATTRS]         = (nfsd4_dec)nfsd4_decode_listxattrs,
+       [OP_REMOVEXATTR]        = (nfsd4_dec)nfsd4_decode_removexattr,
 };
 
 static inline bool
@@ -2992,6 +3213,15 @@ out_acl:
        }
 #endif
 
+       if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) {
+               p = xdr_reserve_space(xdr, 4);
+               if (!p)
+                       goto out_resource;
+               err = xattr_supported_namespace(d_inode(dentry),
+                                               XATTR_USER_PREFIX);
+               *p++ = cpu_to_be32(err == 0);
+       }
+
        attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
        write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
        status = nfs_ok;
@@ -4446,6 +4676,241 @@ nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
        return nfserr;
 }
 
+/*
+ * Encode kmalloc-ed buffer in to XDR stream.
+ */
+static int
+nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen)
+{
+       u32 cplen;
+       __be32 *p;
+
+       cplen = min_t(unsigned long, buflen,
+                     ((void *)xdr->end - (void *)xdr->p));
+       p = xdr_reserve_space(xdr, cplen);
+       if (!p)
+               return nfserr_resource;
+
+       memcpy(p, buf, cplen);
+       buf += cplen;
+       buflen -= cplen;
+
+       while (buflen) {
+               cplen = min_t(u32, buflen, PAGE_SIZE);
+               p = xdr_reserve_space(xdr, cplen);
+               if (!p)
+                       return nfserr_resource;
+
+               memcpy(p, buf, cplen);
+
+               if (cplen < PAGE_SIZE) {
+                       /*
+                        * We're done, with a length that wasn't page
+                        * aligned, so possibly not word aligned. Pad
+                        * any trailing bytes with 0.
+                        */
+                       xdr_encode_opaque_fixed(p, NULL, cplen);
+                       break;
+               }
+
+               buflen -= PAGE_SIZE;
+               buf += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static __be32
+nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+                     struct nfsd4_getxattr *getxattr)
+{
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p, err;
+
+       p = xdr_reserve_space(xdr, 4);
+       if (!p)
+               return nfserr_resource;
+
+       *p = cpu_to_be32(getxattr->getxa_len);
+
+       if (getxattr->getxa_len == 0)
+               return 0;
+
+       err = nfsd4_vbuf_to_stream(xdr, getxattr->getxa_buf,
+                                   getxattr->getxa_len);
+
+       kvfree(getxattr->getxa_buf);
+
+       return err;
+}
+
+static __be32
+nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+                     struct nfsd4_setxattr *setxattr)
+{
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+
+       p = xdr_reserve_space(xdr, 20);
+       if (!p)
+               return nfserr_resource;
+
+       encode_cinfo(p, &setxattr->setxa_cinfo);
+
+       return 0;
+}
+
+/*
+ * See if there are cookie values that can be rejected outright.
+ */
+static __be32
+nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs,
+                               u32 *offsetp)
+{
+       u64 cookie = listxattrs->lsxa_cookie;
+
+       /*
+        * If the cookie is larger than the maximum number we can fit
+        * in either the buffer we just got back from vfs_listxattr, or,
+        * XDR-encoded, in the return buffer, it's invalid.
+        */
+       if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2))
+               return nfserr_badcookie;
+
+       if (cookie > (listxattrs->lsxa_maxcount /
+                     (XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4)))
+               return nfserr_badcookie;
+
+       *offsetp = (u32)cookie;
+       return 0;
+}
+
+static __be32
+nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr,
+                       struct nfsd4_listxattrs *listxattrs)
+{
+       struct xdr_stream *xdr = &resp->xdr;
+       u32 cookie_offset, count_offset, eof;
+       u32 left, xdrleft, slen, count;
+       u32 xdrlen, offset;
+       u64 cookie;
+       char *sp;
+       __be32 status;
+       __be32 *p;
+       u32 nuser;
+
+       eof = 1;
+
+       status = nfsd4_listxattr_validate_cookie(listxattrs, &offset);
+       if (status)
+               goto out;
+
+       /*
+        * Reserve space for the cookie and the name array count. Record
+        * the offsets to save them later.
+        */
+       cookie_offset = xdr->buf->len;
+       count_offset = cookie_offset + 8;
+       p = xdr_reserve_space(xdr, 12);
+       if (!p) {
+               status = nfserr_resource;
+               goto out;
+       }
+
+       count = 0;
+       left = listxattrs->lsxa_len;
+       sp = listxattrs->lsxa_buf;
+       nuser = 0;
+
+       xdrleft = listxattrs->lsxa_maxcount;
+
+       while (left > 0 && xdrleft > 0) {
+               slen = strlen(sp);
+
+               /*
+                * Check if this a user. attribute, skip it if not.
+                */
+               if (strncmp(sp, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+                       goto contloop;
+
+               slen -= XATTR_USER_PREFIX_LEN;
+               xdrlen = 4 + ((slen + 3) & ~3);
+               if (xdrlen > xdrleft) {
+                       if (count == 0) {
+                               /*
+                                * Can't even fit the first attribute name.
+                                */
+                               status = nfserr_toosmall;
+                               goto out;
+                       }
+                       eof = 0;
+                       goto wreof;
+               }
+
+               left -= XATTR_USER_PREFIX_LEN;
+               sp += XATTR_USER_PREFIX_LEN;
+               if (nuser++ < offset)
+                       goto contloop;
+
+
+               p = xdr_reserve_space(xdr, xdrlen);
+               if (!p) {
+                       status = nfserr_resource;
+                       goto out;
+               }
+
+               p = xdr_encode_opaque(p, sp, slen);
+
+               xdrleft -= xdrlen;
+               count++;
+contloop:
+               sp += slen + 1;
+               left -= slen + 1;
+       }
+
+       /*
+        * If there were user attributes to copy, but we didn't copy
+        * any, the offset was too large (e.g. the cookie was invalid).
+        */
+       if (nuser > 0 && count == 0) {
+               status = nfserr_badcookie;
+               goto out;
+       }
+
+wreof:
+       p = xdr_reserve_space(xdr, 4);
+       if (!p) {
+               status = nfserr_resource;
+               goto out;
+       }
+       *p = cpu_to_be32(eof);
+
+       cookie = offset + count;
+
+       write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8);
+       count = htonl(count);
+       write_bytes_to_xdr_buf(xdr->buf, count_offset, &count, 4);
+out:
+       if (listxattrs->lsxa_len)
+               kvfree(listxattrs->lsxa_buf);
+       return status;
+}
+
+static __be32
+nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr,
+                        struct nfsd4_removexattr *removexattr)
+{
+       struct xdr_stream *xdr = &resp->xdr;
+       __be32 *p;
+
+       p = xdr_reserve_space(xdr, 20);
+       if (!p)
+               return nfserr_resource;
+
+       p = encode_cinfo(p, &removexattr->rmxa_cinfo);
+       return 0;
+}
+
 typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *);
 
 /*
@@ -4535,6 +5000,12 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
        [OP_SEEK]               = (nfsd4_enc)nfsd4_encode_seek,
        [OP_WRITE_SAME]         = (nfsd4_enc)nfsd4_encode_noop,
        [OP_CLONE]              = (nfsd4_enc)nfsd4_encode_noop,
+
+       /* RFC 8276 extended atributes operations */
+       [OP_GETXATTR]           = (nfsd4_enc)nfsd4_encode_getxattr,
+       [OP_SETXATTR]           = (nfsd4_enc)nfsd4_encode_setxattr,
+       [OP_LISTXATTRS]         = (nfsd4_enc)nfsd4_encode_listxattrs,
+       [OP_REMOVEXATTR]        = (nfsd4_enc)nfsd4_encode_removexattr,
 };
 
 /*
index 57c832d..cb742e1 100644 (file)
@@ -286,6 +286,8 @@ void                nfsd_lockd_shutdown(void);
 #define nfserr_wrong_lfs               cpu_to_be32(NFS4ERR_WRONG_LFS)
 #define nfserr_badlabel                        cpu_to_be32(NFS4ERR_BADLABEL)
 #define nfserr_file_open               cpu_to_be32(NFS4ERR_FILE_OPEN)
+#define nfserr_xattr2big               cpu_to_be32(NFS4ERR_XATTR2BIG)
+#define nfserr_noxattr                 cpu_to_be32(NFS4ERR_NOXATTR)
 
 /* error codes for internal use */
 /* if a request fails due to kmalloc failure, it gets dropped.
@@ -387,7 +389,8 @@ void                nfsd_lockd_shutdown(void);
        (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
        FATTR4_WORD2_CHANGE_ATTR_TYPE | \
        FATTR4_WORD2_MODE_UMASK | \
-       NFSD4_2_SECURITY_ATTRS)
+       NFSD4_2_SECURITY_ATTRS | \
+       FATTR4_WORD2_XATTR_SUPPORT)
 
 extern const u32 nfsd_suppattrs[3][3];
 
index d22a056..7d2933b 100644 (file)
@@ -612,6 +612,12 @@ static struct accessmap    nfs3_regaccess[] = {
     {  NFS3_ACCESS_MODIFY,     NFSD_MAY_WRITE|NFSD_MAY_TRUNC   },
     {  NFS3_ACCESS_EXTEND,     NFSD_MAY_WRITE                  },
 
+#ifdef CONFIG_NFSD_V4
+    {  NFS4_ACCESS_XAREAD,     NFSD_MAY_READ                   },
+    {  NFS4_ACCESS_XAWRITE,    NFSD_MAY_WRITE                  },
+    {  NFS4_ACCESS_XALIST,     NFSD_MAY_READ                   },
+#endif
+
     {  0,                      0                               }
 };
 
@@ -622,6 +628,12 @@ static struct accessmap    nfs3_diraccess[] = {
     {  NFS3_ACCESS_EXTEND,     NFSD_MAY_EXEC|NFSD_MAY_WRITE    },
     {  NFS3_ACCESS_DELETE,     NFSD_MAY_REMOVE                 },
 
+#ifdef CONFIG_NFSD_V4
+    {  NFS4_ACCESS_XAREAD,     NFSD_MAY_READ                   },
+    {  NFS4_ACCESS_XAWRITE,    NFSD_MAY_WRITE                  },
+    {  NFS4_ACCESS_XALIST,     NFSD_MAY_READ                   },
+#endif
+
     {  0,                      0                               }
 };
 
@@ -2065,6 +2077,233 @@ static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
        return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
 }
 
+#ifdef CONFIG_NFSD_V4
+/*
+ * Helper function to translate error numbers. In the case of xattr operations,
+ * some error codes need to be translated outside of the standard translations.
+ *
+ * ENODATA needs to be translated to nfserr_noxattr.
+ * E2BIG to nfserr_xattr2big.
+ *
+ * Additionally, vfs_listxattr can return -ERANGE. This means that the
+ * file has too many extended attributes to retrieve inside an
+ * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation:
+ * filesystems will allow the adding of extended attributes until they hit
+ * their own internal limit. This limit may be larger than XATTR_LIST_MAX.
+ * So, at that point, the attributes are present and valid, but can't
+ * be retrieved using listxattr, since the upper level xattr code enforces
+ * the XATTR_LIST_MAX limit.
+ *
+ * This bug means that we need to deal with listxattr returning -ERANGE. The
+ * best mapping is to return TOOSMALL.
+ */
+static __be32
+nfsd_xattr_errno(int err)
+{
+       switch (err) {
+       case -ENODATA:
+               return nfserr_noxattr;
+       case -E2BIG:
+               return nfserr_xattr2big;
+       case -ERANGE:
+               return nfserr_toosmall;
+       }
+       return nfserrno(err);
+}
+
+/*
+ * Retrieve the specified user extended attribute. To avoid always
+ * having to allocate the maximum size (since we are not getting
+ * a maximum size from the RPC), do a probe + alloc. Hold a reader
+ * lock on i_rwsem to prevent the extended attribute from changing
+ * size while we're doing this.
+ */
+__be32
+nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
+             void **bufp, int *lenp)
+{
+       ssize_t len;
+       __be32 err;
+       char *buf;
+       struct inode *inode;
+       struct dentry *dentry;
+
+       err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
+       if (err)
+               return err;
+
+       err = nfs_ok;
+       dentry = fhp->fh_dentry;
+       inode = d_inode(dentry);
+
+       inode_lock_shared(inode);
+
+       len = vfs_getxattr(dentry, name, NULL, 0);
+
+       /*
+        * Zero-length attribute, just return.
+        */
+       if (len == 0) {
+               *bufp = NULL;
+               *lenp = 0;
+               goto out;
+       }
+
+       if (len < 0) {
+               err = nfsd_xattr_errno(len);
+               goto out;
+       }
+
+       if (len > *lenp) {
+               err = nfserr_toosmall;
+               goto out;
+       }
+
+       buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS);
+       if (buf == NULL) {
+               err = nfserr_jukebox;
+               goto out;
+       }
+
+       len = vfs_getxattr(dentry, name, buf, len);
+       if (len <= 0) {
+               kvfree(buf);
+               buf = NULL;
+               err = nfsd_xattr_errno(len);
+       }
+
+       *lenp = len;
+       *bufp = buf;
+
+out:
+       inode_unlock_shared(inode);
+
+       return err;
+}
+
+/*
+ * Retrieve the xattr names. Since we can't know how many are
+ * user extended attributes, we must get all attributes here,
+ * and have the XDR encode filter out the "user." ones.
+ *
+ * While this could always just allocate an XATTR_LIST_MAX
+ * buffer, that's a waste, so do a probe + allocate. To
+ * avoid any changes between the probe and allocate, wrap
+ * this in inode_lock.
+ */
+__be32
+nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp,
+              int *lenp)
+{
+       ssize_t len;
+       __be32 err;
+       char *buf;
+       struct inode *inode;
+       struct dentry *dentry;
+
+       err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
+       if (err)
+               return err;
+
+       dentry = fhp->fh_dentry;
+       inode = d_inode(dentry);
+       *lenp = 0;
+
+       inode_lock_shared(inode);
+
+       len = vfs_listxattr(dentry, NULL, 0);
+       if (len <= 0) {
+               err = nfsd_xattr_errno(len);
+               goto out;
+       }
+
+       if (len > XATTR_LIST_MAX) {
+               err = nfserr_xattr2big;
+               goto out;
+       }
+
+       /*
+        * We're holding i_rwsem - use GFP_NOFS.
+        */
+       buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS);
+       if (buf == NULL) {
+               err = nfserr_jukebox;
+               goto out;
+       }
+
+       len = vfs_listxattr(dentry, buf, len);
+       if (len <= 0) {
+               kvfree(buf);
+               err = nfsd_xattr_errno(len);
+               goto out;
+       }
+
+       *lenp = len;
+       *bufp = buf;
+
+       err = nfs_ok;
+out:
+       inode_unlock_shared(inode);
+
+       return err;
+}
+
+/*
+ * Removexattr and setxattr need to call fh_lock to both lock the inode
+ * and set the change attribute. Since the top-level vfs_removexattr
+ * and vfs_setxattr calls already do their own inode_lock calls, call
+ * the _locked variant. Pass in a NULL pointer for delegated_inode,
+ * and let the client deal with NFS4ERR_DELAY (same as with e.g.
+ * setattr and remove).
+ */
+__be32
+nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
+{
+       int err, ret;
+
+       err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
+       if (err)
+               return err;
+
+       ret = fh_want_write(fhp);
+       if (ret)
+               return nfserrno(ret);
+
+       fh_lock(fhp);
+
+       ret = __vfs_removexattr_locked(fhp->fh_dentry, name, NULL);
+
+       fh_unlock(fhp);
+       fh_drop_write(fhp);
+
+       return nfsd_xattr_errno(ret);
+}
+
+__be32
+nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
+             void *buf, u32 len, u32 flags)
+{
+       int err, ret;
+
+       err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
+       if (err)
+               return err;
+
+       ret = fh_want_write(fhp);
+       if (ret)
+               return nfserrno(ret);
+       fh_lock(fhp);
+
+       ret = __vfs_setxattr_locked(fhp->fh_dentry, name, buf, len, flags,
+                                   NULL);
+
+       fh_unlock(fhp);
+       fh_drop_write(fhp);
+
+       return nfsd_xattr_errno(ret);
+}
+#endif
+
 /*
  * Check for a user's access permissions to this inode.
  */
index 3eb660a..a2442eb 100644 (file)
@@ -76,6 +76,16 @@ __be32               do_nfsd_create(struct svc_rqst *, struct svc_fh *,
 __be32         nfsd_commit(struct svc_rqst *, struct svc_fh *,
                                loff_t, unsigned long, __be32 *verf);
 #endif /* CONFIG_NFSD_V3 */
+#ifdef CONFIG_NFSD_V4
+__be32         nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                           char *name, void **bufp, int *lenp);
+__be32         nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                           char **bufp, int *lenp);
+__be32         nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                           char *name);
+__be32         nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                           char *name, void *buf, u32 len, u32 flags);
+#endif
 int            nfsd_open_break_lease(struct inode *, int);
 __be32         nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
                                int, struct file **);
index db63d39..66499fb 100644 (file)
@@ -224,6 +224,32 @@ struct nfsd4_putfh {
        bool            no_verify;          /* represents foreigh fh */
 };
 
+struct nfsd4_getxattr {
+       char            *getxa_name;            /* request */
+       u32             getxa_len;              /* request */
+       void            *getxa_buf;
+};
+
+struct nfsd4_setxattr {
+       u32             setxa_flags;            /* request */
+       char            *setxa_name;            /* request */
+       char            *setxa_buf;             /* request */
+       u32             setxa_len;              /* request */
+       struct nfsd4_change_info  setxa_cinfo;  /* response */
+};
+
+struct nfsd4_removexattr {
+       char            *rmxa_name;             /* request */
+       struct nfsd4_change_info  rmxa_cinfo;   /* response */
+};
+
+struct nfsd4_listxattrs {
+       u64             lsxa_cookie;            /* request */
+       u32             lsxa_maxcount;          /* request */
+       char            *lsxa_buf;              /* unfiltered buffer (reply) */
+       u32             lsxa_len;               /* unfiltered len (reply) */
+};
+
 struct nfsd4_open {
        u32             op_claim_type;      /* request */
        struct xdr_netobj op_fname;         /* request - everything but CLAIM_PREV */
@@ -649,6 +675,11 @@ struct nfsd4_op {
                struct nfsd4_offload_status     offload_status;
                struct nfsd4_copy_notify        copy_notify;
                struct nfsd4_seek               seek;
+
+               struct nfsd4_getxattr           getxattr;
+               struct nfsd4_setxattr           setxattr;
+               struct nfsd4_listxattrs         listxattrs;
+               struct nfsd4_removexattr        removexattr;
        } u;
        struct nfs4_replay *                    replay;
 };
index 235b959..adf3bb0 100644 (file)
@@ -613,10 +613,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
        lock = nilfs_mdt_bgl_lock(inode, group);
 
        if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
-               nilfs_msg(inode->i_sb, KERN_WARNING,
-                         "%s (ino=%lu): entry number %llu already freed",
-                         __func__, inode->i_ino,
-                         (unsigned long long)req->pr_entry_nr);
+               nilfs_warn(inode->i_sb,
+                          "%s (ino=%lu): entry number %llu already freed",
+                          __func__, inode->i_ino,
+                          (unsigned long long)req->pr_entry_nr);
        else
                nilfs_palloc_group_desc_add_entries(desc, lock, 1);
 
@@ -654,10 +654,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
        lock = nilfs_mdt_bgl_lock(inode, group);
 
        if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
-               nilfs_msg(inode->i_sb, KERN_WARNING,
-                         "%s (ino=%lu): entry number %llu already freed",
-                         __func__, inode->i_ino,
-                         (unsigned long long)req->pr_entry_nr);
+               nilfs_warn(inode->i_sb,
+                          "%s (ino=%lu): entry number %llu already freed",
+                          __func__, inode->i_ino,
+                          (unsigned long long)req->pr_entry_nr);
        else
                nilfs_palloc_group_desc_add_entries(desc, lock, 1);
 
@@ -763,10 +763,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                do {
                        if (!nilfs_clear_bit_atomic(lock, group_offset,
                                                    bitmap)) {
-                               nilfs_msg(inode->i_sb, KERN_WARNING,
-                                         "%s (ino=%lu): entry number %llu already freed",
-                                         __func__, inode->i_ino,
-                                         (unsigned long long)entry_nrs[j]);
+                               nilfs_warn(inode->i_sb,
+                                          "%s (ino=%lu): entry number %llu already freed",
+                                          __func__, inode->i_ino,
+                                          (unsigned long long)entry_nrs[j]);
                        } else {
                                n++;
                        }
@@ -808,10 +808,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                        ret = nilfs_palloc_delete_entry_block(inode,
                                                              last_nrs[k]);
                        if (ret && ret != -ENOENT)
-                               nilfs_msg(inode->i_sb, KERN_WARNING,
-                                         "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
-                                         ret, (unsigned long long)last_nrs[k],
-                                         inode->i_ino);
+                               nilfs_warn(inode->i_sb,
+                                          "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
+                                          ret, (unsigned long long)last_nrs[k],
+                                          inode->i_ino);
                }
 
                desc_kaddr = kmap_atomic(desc_bh->b_page);
@@ -826,9 +826,9 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
                if (nfree == nilfs_palloc_entries_per_group(inode)) {
                        ret = nilfs_palloc_delete_bitmap_block(inode, group);
                        if (ret && ret != -ENOENT)
-                               nilfs_msg(inode->i_sb, KERN_WARNING,
-                                         "error %d deleting bitmap block of group=%lu, ino=%lu",
-                                         ret, group, inode->i_ino);
+                               nilfs_warn(inode->i_sb,
+                                          "error %d deleting bitmap block of group=%lu, ino=%lu",
+                                          ret, group, inode->i_ino);
                }
        }
        return 0;
index 23e043e..f42ab57 100644 (file)
@@ -351,10 +351,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
                     (flags & NILFS_BTREE_NODE_ROOT) ||
                     nchildren < 0 ||
                     nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
-               nilfs_msg(inode->i_sb, KERN_CRIT,
-                         "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
-                         inode->i_ino, (unsigned long long)blocknr, level,
-                         flags, nchildren);
+               nilfs_crit(inode->i_sb,
+                          "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
+                          inode->i_ino, (unsigned long long)blocknr, level,
+                          flags, nchildren);
                ret = 1;
        }
        return ret;
@@ -381,9 +381,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
                     level >= NILFS_BTREE_LEVEL_MAX ||
                     nchildren < 0 ||
                     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
-               nilfs_msg(inode->i_sb, KERN_CRIT,
-                         "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
-                         inode->i_ino, level, flags, nchildren);
+               nilfs_crit(inode->i_sb,
+                          "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
+                          inode->i_ino, level, flags, nchildren);
                ret = 1;
        }
        return ret;
@@ -450,10 +450,10 @@ static int nilfs_btree_bad_node(const struct nilfs_bmap *btree,
 {
        if (unlikely(nilfs_btree_node_get_level(node) != level)) {
                dump_stack();
-               nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
-                         "btree level mismatch (ino=%lu): %d != %d",
-                         btree->b_inode->i_ino,
-                         nilfs_btree_node_get_level(node), level);
+               nilfs_crit(btree->b_inode->i_sb,
+                          "btree level mismatch (ino=%lu): %d != %d",
+                          btree->b_inode->i_ino,
+                          nilfs_btree_node_get_level(node), level);
                return 1;
        }
        return 0;
@@ -508,7 +508,7 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
 
  out_no_wait:
        if (!buffer_uptodate(bh)) {
-               nilfs_msg(btree->b_inode->i_sb, KERN_ERR,
+               nilfs_err(btree->b_inode->i_sb,
                          "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)",
                          btree->b_inode->i_ino, (unsigned long long)ptr);
                brelse(bh);
@@ -2074,10 +2074,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
        ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
        if (ret < 0) {
                if (unlikely(ret == -ENOENT))
-                       nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
-                                 "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
-                                 btree->b_inode->i_ino,
-                                 (unsigned long long)key, level);
+                       nilfs_crit(btree->b_inode->i_sb,
+                                  "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
+                                  btree->b_inode->i_ino,
+                                  (unsigned long long)key, level);
                goto out;
        }
 
@@ -2114,11 +2114,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
        if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
            level >= NILFS_BTREE_LEVEL_MAX) {
                dump_stack();
-               nilfs_msg(btree->b_inode->i_sb, KERN_WARNING,
-                         "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
-                         level, (unsigned long long)key,
-                         btree->b_inode->i_ino,
-                         (unsigned long long)bh->b_blocknr);
+               nilfs_warn(btree->b_inode->i_sb,
+                          "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
+                          level, (unsigned long long)key,
+                          btree->b_inode->i_ino,
+                          (unsigned long long)bh->b_blocknr);
                return;
        }
 
index 8d41311..86d4d85 100644 (file)
@@ -322,7 +322,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
        int ret, ncps, nicps, nss, count, i;
 
        if (unlikely(start == 0 || start > end)) {
-               nilfs_msg(cpfile->i_sb, KERN_ERR,
+               nilfs_err(cpfile->i_sb,
                          "cannot delete checkpoints: invalid range [%llu, %llu)",
                          (unsigned long long)start, (unsigned long long)end);
                return -EINVAL;
@@ -376,7 +376,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
                                                                   cpfile, cno);
                                        if (ret == 0)
                                                continue;
-                                       nilfs_msg(cpfile->i_sb, KERN_ERR,
+                                       nilfs_err(cpfile->i_sb,
                                                  "error %d deleting checkpoint block",
                                                  ret);
                                        break;
@@ -981,12 +981,10 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
        int err;
 
        if (cpsize > sb->s_blocksize) {
-               nilfs_msg(sb, KERN_ERR,
-                         "too large checkpoint size: %zu bytes", cpsize);
+               nilfs_err(sb, "too large checkpoint size: %zu bytes", cpsize);
                return -EINVAL;
        } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
-               nilfs_msg(sb, KERN_ERR,
-                         "too small checkpoint size: %zu bytes", cpsize);
+               nilfs_err(sb, "too small checkpoint size: %zu bytes", cpsize);
                return -EINVAL;
        }
 
index 6f40666..8bccdf1 100644 (file)
@@ -340,11 +340,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
        kaddr = kmap_atomic(entry_bh->b_page);
        entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
        if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
-               nilfs_msg(dat->i_sb, KERN_CRIT,
-                         "%s: invalid vblocknr = %llu, [%llu, %llu)",
-                         __func__, (unsigned long long)vblocknr,
-                         (unsigned long long)le64_to_cpu(entry->de_start),
-                         (unsigned long long)le64_to_cpu(entry->de_end));
+               nilfs_crit(dat->i_sb,
+                          "%s: invalid vblocknr = %llu, [%llu, %llu)",
+                          __func__, (unsigned long long)vblocknr,
+                          (unsigned long long)le64_to_cpu(entry->de_start),
+                          (unsigned long long)le64_to_cpu(entry->de_end));
                kunmap_atomic(kaddr);
                brelse(entry_bh);
                return -EINVAL;
@@ -471,11 +471,11 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
        int err;
 
        if (entry_size > sb->s_blocksize) {
-               nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
+               nilfs_err(sb, "too large DAT entry size: %zu bytes",
                          entry_size);
                return -EINVAL;
        } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
-               nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
+               nilfs_err(sb, "too small DAT entry size: %zu bytes",
                          entry_size);
                return -EINVAL;
        }
index 533e24e..f353101 100644 (file)
@@ -328,16 +328,18 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
 
        key = nilfs_bmap_data_get_key(bmap, *bh);
        if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
-               nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
-                         "%s (ino=%lu): invalid key: %llu", __func__,
-                         bmap->b_inode->i_ino, (unsigned long long)key);
+               nilfs_crit(bmap->b_inode->i_sb,
+                          "%s (ino=%lu): invalid key: %llu",
+                          __func__,
+                          bmap->b_inode->i_ino, (unsigned long long)key);
                return -EINVAL;
        }
        ptr = nilfs_direct_get_ptr(bmap, key);
        if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
-               nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
-                         "%s (ino=%lu): invalid pointer: %llu", __func__,
-                         bmap->b_inode->i_ino, (unsigned long long)ptr);
+               nilfs_crit(bmap->b_inode->i_sb,
+                          "%s (ino=%lu): invalid pointer: %llu",
+                          __func__,
+                          bmap->b_inode->i_ino, (unsigned long long)ptr);
                return -EINVAL;
        }
 
index aa3c328..4483204 100644 (file)
@@ -142,7 +142,7 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
        if (!buffer_uptodate(bh)) {
                struct inode *inode = bh->b_page->mapping->host;
 
-               nilfs_msg(inode->i_sb, KERN_ERR,
+               nilfs_err(inode->i_sb,
                          "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)",
                          buffer_nilfs_node(bh) ? "node" : "data",
                          inode->i_ino, (unsigned long long)bh->b_blocknr);
index 4140d23..02727ed 100644 (file)
@@ -142,8 +142,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
 
        err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh);
        if (unlikely(err))
-               nilfs_msg(sb, KERN_WARNING, "error %d reading inode: ino=%lu",
-                         err, (unsigned long)ino);
+               nilfs_warn(sb, "error %d reading inode: ino=%lu",
+                          err, (unsigned long)ino);
        return err;
 }
 
index 28009ec..745d371 100644 (file)
@@ -104,10 +104,10 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
                                 * However, the page having this block must
                                 * be locked in this case.
                                 */
-                               nilfs_msg(inode->i_sb, KERN_WARNING,
-                                         "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
-                                         __func__, inode->i_ino,
-                                         (unsigned long long)blkoff);
+                               nilfs_warn(inode->i_sb,
+                                          "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+                                          __func__, inode->i_ino,
+                                          (unsigned long long)blkoff);
                                err = 0;
                        }
                        nilfs_transaction_abort(inode->i_sb);
@@ -388,7 +388,8 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 
  failed_after_creation:
        clear_nlink(inode);
-       unlock_new_inode(inode);
+       if (inode->i_state & I_NEW)
+               unlock_new_inode(inode);
        iput(inode);  /*
                       * raw_inode will be deleted through
                       * nilfs_evict_inode().
@@ -706,9 +707,8 @@ repeat:
                goto repeat;
 
 failed:
-       nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
-                 "error %d truncating bmap (ino=%lu)", ret,
-                 ii->vfs_inode.i_ino);
+       nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
+                  ret, ii->vfs_inode.i_ino);
 }
 
 void nilfs_truncate(struct inode *inode)
@@ -919,9 +919,9 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
                         * This will happen when somebody is freeing
                         * this inode.
                         */
-                       nilfs_msg(inode->i_sb, KERN_WARNING,
-                                 "cannot set file dirty (ino=%lu): the file is being freed",
-                                 inode->i_ino);
+                       nilfs_warn(inode->i_sb,
+                                  "cannot set file dirty (ino=%lu): the file is being freed",
+                                  inode->i_ino);
                        spin_unlock(&nilfs->ns_inode_lock);
                        return -EINVAL; /*
                                         * NILFS_I_DIRTY may remain for
@@ -942,9 +942,9 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 
        err = nilfs_load_inode_block(inode, &ibh);
        if (unlikely(err)) {
-               nilfs_msg(inode->i_sb, KERN_WARNING,
-                         "cannot mark inode dirty (ino=%lu): error %d loading inode block",
-                         inode->i_ino, err);
+               nilfs_warn(inode->i_sb,
+                          "cannot mark inode dirty (ino=%lu): error %d loading inode block",
+                          inode->i_ino, err);
                return err;
        }
        nilfs_update_inode(inode, ibh, flags);
@@ -970,8 +970,8 @@ void nilfs_dirty_inode(struct inode *inode, int flags)
        struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
        if (is_bad_inode(inode)) {
-               nilfs_msg(inode->i_sb, KERN_WARNING,
-                         "tried to mark bad_inode dirty. ignored.");
+               nilfs_warn(inode->i_sb,
+                          "tried to mark bad_inode dirty. ignored.");
                dump_stack();
                return;
        }
index 4ba73db..07d26f6 100644 (file)
@@ -569,25 +569,25 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
 
        if (unlikely(ret < 0)) {
                if (ret == -ENOENT)
-                       nilfs_msg(inode->i_sb, KERN_CRIT,
-                                 "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
-                                 __func__, vdesc->vd_flags ? "node" : "data",
-                                 (unsigned long long)vdesc->vd_ino,
-                                 (unsigned long long)vdesc->vd_cno,
-                                 (unsigned long long)vdesc->vd_offset,
-                                 (unsigned long long)vdesc->vd_blocknr,
-                                 (unsigned long long)vdesc->vd_vblocknr);
+                       nilfs_crit(inode->i_sb,
+                                  "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+                                  __func__, vdesc->vd_flags ? "node" : "data",
+                                  (unsigned long long)vdesc->vd_ino,
+                                  (unsigned long long)vdesc->vd_cno,
+                                  (unsigned long long)vdesc->vd_offset,
+                                  (unsigned long long)vdesc->vd_blocknr,
+                                  (unsigned long long)vdesc->vd_vblocknr);
                return ret;
        }
        if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
-               nilfs_msg(inode->i_sb, KERN_CRIT,
-                         "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
-                         __func__, vdesc->vd_flags ? "node" : "data",
-                         (unsigned long long)vdesc->vd_ino,
-                         (unsigned long long)vdesc->vd_cno,
-                         (unsigned long long)vdesc->vd_offset,
-                         (unsigned long long)vdesc->vd_blocknr,
-                         (unsigned long long)vdesc->vd_vblocknr);
+               nilfs_crit(inode->i_sb,
+                          "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+                          __func__, vdesc->vd_flags ? "node" : "data",
+                          (unsigned long long)vdesc->vd_ino,
+                          (unsigned long long)vdesc->vd_cno,
+                          (unsigned long long)vdesc->vd_offset,
+                          (unsigned long long)vdesc->vd_blocknr,
+                          (unsigned long long)vdesc->vd_vblocknr);
                brelse(bh);
                return -EEXIST;
        }
@@ -837,8 +837,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
        return 0;
 
  failed:
-       nilfs_msg(nilfs->ns_sb, KERN_ERR, "error %d preparing GC: %s", ret,
-                 msg);
+       nilfs_err(nilfs->ns_sb, "error %d preparing GC: %s", ret, msg);
        return ret;
 }
 
@@ -947,7 +946,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
 
        ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
        if (ret < 0) {
-               nilfs_msg(inode->i_sb, KERN_ERR,
+               nilfs_err(inode->i_sb,
                          "error %d preparing GC: cannot read source blocks",
                          ret);
        } else {
index 700870a..c0361ce 100644 (file)
@@ -199,7 +199,7 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
  out_no_wait:
        err = -EIO;
        if (!buffer_uptodate(first_bh)) {
-               nilfs_msg(inode->i_sb, KERN_ERR,
+               nilfs_err(inode->i_sb,
                          "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
                          inode->i_ino, block);
                goto failed_bh;
index 9fe6d4a..a6ec796 100644 (file)
@@ -272,9 +272,9 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
                goto out;
 
        if (!inode->i_nlink) {
-               nilfs_msg(inode->i_sb, KERN_WARNING,
-                         "deleting nonexistent file (ino=%lu), %d",
-                         inode->i_ino, inode->i_nlink);
+               nilfs_warn(inode->i_sb,
+                          "deleting nonexistent file (ino=%lu), %d",
+                          inode->i_ino, inode->i_nlink);
                set_nlink(inode, 1);
        }
        err = nilfs_delete_entry(de, page);
index 42395ba..f8450ee 100644 (file)
@@ -289,9 +289,8 @@ static inline int nilfs_mark_inode_dirty_sync(struct inode *inode)
 /* super.c */
 extern struct inode *nilfs_alloc_inode(struct super_block *);
 
-extern __printf(3, 4)
-void __nilfs_msg(struct super_block *sb, const char *level,
-                const char *fmt, ...);
+__printf(2, 3)
+void __nilfs_msg(struct super_block *sb, const char *fmt, ...);
 extern __printf(3, 4)
 void __nilfs_error(struct super_block *sb, const char *function,
                   const char *fmt, ...);
@@ -299,7 +298,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
 #ifdef CONFIG_PRINTK
 
 #define nilfs_msg(sb, level, fmt, ...)                                 \
-       __nilfs_msg(sb, level, fmt, ##__VA_ARGS__)
+       __nilfs_msg(sb, level fmt, ##__VA_ARGS__)
 #define nilfs_error(sb, fmt, ...)                                      \
        __nilfs_error(sb, __func__, fmt, ##__VA_ARGS__)
 
@@ -307,7 +306,7 @@ void __nilfs_error(struct super_block *sb, const char *function,
 
 #define nilfs_msg(sb, level, fmt, ...)                                 \
        do {                                                            \
-               no_printk(fmt, ##__VA_ARGS__);                          \
+               no_printk(level fmt, ##__VA_ARGS__);                    \
                (void)(sb);                                             \
        } while (0)
 #define nilfs_error(sb, fmt, ...)                                      \
@@ -318,6 +317,15 @@ void __nilfs_error(struct super_block *sb, const char *function,
 
 #endif /* CONFIG_PRINTK */
 
+#define nilfs_crit(sb, fmt, ...)                                       \
+       nilfs_msg(sb, KERN_CRIT, fmt, ##__VA_ARGS__)
+#define nilfs_err(sb, fmt, ...)                                                \
+       nilfs_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+#define nilfs_warn(sb, fmt, ...)                                       \
+       nilfs_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define nilfs_info(sb, fmt, ...)                                       \
+       nilfs_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__)
+
 extern struct nilfs_super_block *
 nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
 extern int nilfs_store_magic_and_option(struct super_block *,
index d7fc8d3..b175f13 100644 (file)
@@ -391,9 +391,8 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
        BUG_ON(!PageLocked(page));
 
        if (!silent)
-               nilfs_msg(sb, KERN_WARNING,
-                         "discard dirty page: offset=%lld, ino=%lu",
-                         page_offset(page), inode->i_ino);
+               nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
+                          page_offset(page), inode->i_ino);
 
        ClearPageUptodate(page);
        ClearPageMappedToDisk(page);
@@ -409,9 +408,9 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
                do {
                        lock_buffer(bh);
                        if (!silent)
-                               nilfs_msg(sb, KERN_WARNING,
-                                         "discard dirty block: blocknr=%llu, size=%zu",
-                                         (u64)bh->b_blocknr, bh->b_size);
+                               nilfs_warn(sb,
+                                          "discard dirty block: blocknr=%llu, size=%zu",
+                                          (u64)bh->b_blocknr, bh->b_size);
 
                        set_mask_bits(&bh->b_state, clear_bits, 0);
                        unlock_buffer(bh);
index 140b663..0b453ef 100644 (file)
@@ -51,7 +51,7 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err)
 
        switch (err) {
        case NILFS_SEG_FAIL_IO:
-               nilfs_msg(sb, KERN_ERR, "I/O error reading segment");
+               nilfs_err(sb, "I/O error reading segment");
                return -EIO;
        case NILFS_SEG_FAIL_MAGIC:
                msg = "Magic number mismatch";
@@ -72,10 +72,10 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err)
                msg = "No super root in the last segment";
                break;
        default:
-               nilfs_msg(sb, KERN_ERR, "unrecognized segment error %d", err);
+               nilfs_err(sb, "unrecognized segment error %d", err);
                return -EINVAL;
        }
-       nilfs_msg(sb, KERN_WARNING, "invalid segment: %s", msg);
+       nilfs_warn(sb, "invalid segment: %s", msg);
        return -EINVAL;
 }
 
@@ -543,10 +543,10 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
                put_page(page);
 
  failed_inode:
-               nilfs_msg(sb, KERN_WARNING,
-                         "error %d recovering data block (ino=%lu, block-offset=%llu)",
-                         err, (unsigned long)rb->ino,
-                         (unsigned long long)rb->blkoff);
+               nilfs_warn(sb,
+                          "error %d recovering data block (ino=%lu, block-offset=%llu)",
+                          err, (unsigned long)rb->ino,
+                          (unsigned long long)rb->blkoff);
                if (!err2)
                        err2 = err;
  next:
@@ -669,8 +669,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
        }
 
        if (nsalvaged_blocks) {
-               nilfs_msg(sb, KERN_INFO, "salvaged %lu blocks",
-                         nsalvaged_blocks);
+               nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks);
                ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
        }
  out:
@@ -681,7 +680,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
  confused:
        err = -EINVAL;
  failed:
-       nilfs_msg(sb, KERN_ERR,
+       nilfs_err(sb,
                  "error %d roll-forwarding partial segment at blocknr = %llu",
                  err, (unsigned long long)pseg_start);
        goto out;
@@ -703,8 +702,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
        set_buffer_dirty(bh);
        err = sync_dirty_buffer(bh);
        if (unlikely(err))
-               nilfs_msg(nilfs->ns_sb, KERN_WARNING,
-                         "buffer sync write failed during post-cleaning of recovery.");
+               nilfs_warn(nilfs->ns_sb,
+                          "buffer sync write failed during post-cleaning of recovery.");
        brelse(bh);
 }
 
@@ -739,8 +738,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
 
        err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
        if (unlikely(err)) {
-               nilfs_msg(sb, KERN_ERR,
-                         "error %d loading the latest checkpoint", err);
+               nilfs_err(sb, "error %d loading the latest checkpoint", err);
                return err;
        }
 
@@ -751,8 +749,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
        if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
                err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
                if (unlikely(err)) {
-                       nilfs_msg(sb, KERN_ERR,
-                                 "error %d preparing segment for recovery",
+                       nilfs_err(sb, "error %d preparing segment for recovery",
                                  err);
                        goto failed;
                }
@@ -766,8 +763,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
                nilfs_detach_log_writer(sb);
 
                if (unlikely(err)) {
-                       nilfs_msg(sb, KERN_ERR,
-                                 "error %d writing segment for recovery",
+                       nilfs_err(sb, "error %d writing segment for recovery",
                                  err);
                        goto failed;
                }
index 20c479b..1a8729e 100644 (file)
@@ -505,7 +505,7 @@ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
        } while (--segbuf->sb_nbio > 0);
 
        if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
-               nilfs_msg(segbuf->sb_super, KERN_ERR,
+               nilfs_err(segbuf->sb_super,
                          "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
                          (unsigned long long)segbuf->sb_pseg_start,
                          segbuf->sb_sum.nblocks,
index 91b58c8..a651e82 100644 (file)
@@ -158,7 +158,7 @@ static int nilfs_prepare_segment_lock(struct super_block *sb,
                 * it is saved and will be restored on
                 * nilfs_transaction_commit().
                 */
-               nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
+               nilfs_warn(sb, "journal info from a different FS");
                save = current->journal_info;
        }
        if (!ti) {
@@ -1940,9 +1940,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
                        err = nilfs_ifile_get_inode_block(
                                ifile, ii->vfs_inode.i_ino, &ibh);
                        if (unlikely(err)) {
-                               nilfs_msg(sci->sc_super, KERN_WARNING,
-                                         "log writer: error %d getting inode block (ino=%lu)",
-                                         err, ii->vfs_inode.i_ino);
+                               nilfs_warn(sci->sc_super,
+                                          "log writer: error %d getting inode block (ino=%lu)",
+                                          err, ii->vfs_inode.i_ino);
                                return err;
                        }
                        spin_lock(&nilfs->ns_inode_lock);
@@ -2449,7 +2449,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
                if (likely(!err))
                        break;
 
-               nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
+               nilfs_warn(sb, "error %d cleaning segments", err);
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(sci->sc_interval);
        }
@@ -2457,9 +2457,9 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
                int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
                                                 sci->sc_nfreesegs);
                if (ret) {
-                       nilfs_msg(sb, KERN_WARNING,
-                                 "error %d on discard request, turning discards off for the device",
-                                 ret);
+                       nilfs_warn(sb,
+                                  "error %d on discard request, turning discards off for the device",
+                                  ret);
                        nilfs_clear_opt(nilfs, DISCARD);
                }
        }
@@ -2540,9 +2540,9 @@ static int nilfs_segctor_thread(void *arg)
        /* start sync. */
        sci->sc_task = current;
        wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
-       nilfs_msg(sci->sc_super, KERN_INFO,
-                 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
-                 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
+       nilfs_info(sci->sc_super,
+                  "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
+                  sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
 
        spin_lock(&sci->sc_state_lock);
  loop:
@@ -2616,8 +2616,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
        if (IS_ERR(t)) {
                int err = PTR_ERR(t);
 
-               nilfs_msg(sci->sc_super, KERN_ERR,
-                         "error %d creating segctord thread", err);
+               nilfs_err(sci->sc_super, "error %d creating segctord thread",
+                         err);
                return err;
        }
        wait_event(sci->sc_wait_task, sci->sc_task != NULL);
@@ -2727,14 +2727,14 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                nilfs_segctor_write_out(sci);
 
        if (!list_empty(&sci->sc_dirty_files)) {
-               nilfs_msg(sci->sc_super, KERN_WARNING,
-                         "disposed unprocessed dirty file(s) when stopping log writer");
+               nilfs_warn(sci->sc_super,
+                          "disposed unprocessed dirty file(s) when stopping log writer");
                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
        }
 
        if (!list_empty(&sci->sc_iput_queue)) {
-               nilfs_msg(sci->sc_super, KERN_WARNING,
-                         "disposed unprocessed inode(s) in iput queue when stopping log writer");
+               nilfs_warn(sci->sc_super,
+                          "disposed unprocessed inode(s) in iput queue when stopping log writer");
                nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
        }
 
@@ -2812,8 +2812,8 @@ void nilfs_detach_log_writer(struct super_block *sb)
        spin_lock(&nilfs->ns_inode_lock);
        if (!list_empty(&nilfs->ns_dirty_files)) {
                list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
-               nilfs_msg(sb, KERN_WARNING,
-                         "disposed unprocessed dirty file(s) when detaching log writer");
+               nilfs_warn(sb,
+                          "disposed unprocessed dirty file(s) when detaching log writer");
        }
        spin_unlock(&nilfs->ns_inode_lock);
        up_write(&nilfs->ns_segctor_sem);
index bf3f8f0..42ff67c 100644 (file)
@@ -171,9 +171,9 @@ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
        down_write(&NILFS_MDT(sufile)->mi_sem);
        for (seg = segnumv; seg < segnumv + nsegs; seg++) {
                if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
-                       nilfs_msg(sufile->i_sb, KERN_WARNING,
-                                 "%s: invalid segment number: %llu",
-                                 __func__, (unsigned long long)*seg);
+                       nilfs_warn(sufile->i_sb,
+                                  "%s: invalid segment number: %llu",
+                                  __func__, (unsigned long long)*seg);
                        nerr++;
                }
        }
@@ -230,9 +230,8 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
        int ret;
 
        if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
-               nilfs_msg(sufile->i_sb, KERN_WARNING,
-                         "%s: invalid segment number: %llu",
-                         __func__, (unsigned long long)segnum);
+               nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
+                          __func__, (unsigned long long)segnum);
                return -EINVAL;
        }
        down_write(&NILFS_MDT(sufile)->mi_sem);
@@ -410,9 +409,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
        kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (unlikely(!nilfs_segment_usage_clean(su))) {
-               nilfs_msg(sufile->i_sb, KERN_WARNING,
-                         "%s: segment %llu must be clean", __func__,
-                         (unsigned long long)segnum);
+               nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
+                          __func__, (unsigned long long)segnum);
                kunmap_atomic(kaddr);
                return;
        }
@@ -468,9 +466,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
        kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (nilfs_segment_usage_clean(su)) {
-               nilfs_msg(sufile->i_sb, KERN_WARNING,
-                         "%s: segment %llu is already clean",
-                         __func__, (unsigned long long)segnum);
+               nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
+                          __func__, (unsigned long long)segnum);
                kunmap_atomic(kaddr);
                return;
        }
@@ -1168,12 +1165,12 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
        int err;
 
        if (susize > sb->s_blocksize) {
-               nilfs_msg(sb, KERN_ERR,
-                         "too large segment usage size: %zu bytes", susize);
+               nilfs_err(sb, "too large segment usage size: %zu bytes",
+                         susize);
                return -EINVAL;
        } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
-               nilfs_msg(sb, KERN_ERR,
-                         "too small segment usage size: %zu bytes", susize);
+               nilfs_err(sb, "too small segment usage size: %zu bytes",
+                         susize);
                return -EINVAL;
        }
 
index 5729ee8..2eee5fb 100644 (file)
@@ -62,19 +62,25 @@ struct kmem_cache *nilfs_btree_path_cache;
 static int nilfs_setup_super(struct super_block *sb, int is_mount);
 static int nilfs_remount(struct super_block *sb, int *flags, char *data);
 
-void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt,
-                ...)
+void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
+       int level;
 
        va_start(args, fmt);
-       vaf.fmt = fmt;
+
+       level = printk_get_level(fmt);
+       vaf.fmt = printk_skip_level(fmt);
        vaf.va = &args;
+
        if (sb)
-               printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf);
+               printk("%c%cNILFS (%s): %pV\n",
+                      KERN_SOH_ASCII, level, sb->s_id, &vaf);
        else
-               printk("%sNILFS: %pV\n", level, &vaf);
+               printk("%c%cNILFS: %pV\n",
+                      KERN_SOH_ASCII, level, &vaf);
+
        va_end(args);
 }
 
@@ -106,7 +112,7 @@ static void nilfs_set_error(struct super_block *sb)
  *
  * This implements the body of nilfs_error() macro.  Normally,
  * nilfs_error() should be used.  As for sustainable errors such as a
- * single-shot I/O error, nilfs_msg() should be used instead.
+ * single-shot I/O error, nilfs_err() should be used instead.
  *
  * Callers should not add a trailing newline since this will do it.
  */
@@ -178,8 +184,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
        }
 
        if (unlikely(err)) {
-               nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d",
-                         err);
+               nilfs_err(sb, "unable to write superblock: err=%d", err);
                if (err == -EIO && nilfs->ns_sbh[1]) {
                        /*
                         * sbp[0] points to newer log than sbp[1],
@@ -249,7 +254,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
                    sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
                        memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
                } else {
-                       nilfs_msg(sb, KERN_CRIT, "superblock broke");
+                       nilfs_crit(sb, "superblock broke");
                        return NULL;
                }
        } else if (sbp[1] &&
@@ -359,9 +364,9 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
        offset = sb2off & (nilfs->ns_blocksize - 1);
        nsbh = sb_getblk(sb, newblocknr);
        if (!nsbh) {
-               nilfs_msg(sb, KERN_WARNING,
-                         "unable to move secondary superblock to block %llu",
-                         (unsigned long long)newblocknr);
+               nilfs_warn(sb,
+                          "unable to move secondary superblock to block %llu",
+                          (unsigned long long)newblocknr);
                ret = -EIO;
                goto out;
        }
@@ -524,7 +529,7 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
        up_read(&nilfs->ns_segctor_sem);
        if (unlikely(err)) {
                if (err == -ENOENT || err == -EINVAL) {
-                       nilfs_msg(sb, KERN_ERR,
+                       nilfs_err(sb,
                                  "Invalid checkpoint (checkpoint number=%llu)",
                                  (unsigned long long)cno);
                        err = -EINVAL;
@@ -622,8 +627,7 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        err = nilfs_ifile_count_free_inodes(root->ifile,
                                            &nmaxinodes, &nfreeinodes);
        if (unlikely(err)) {
-               nilfs_msg(sb, KERN_WARNING,
-                         "failed to count free inodes: err=%d", err);
+               nilfs_warn(sb, "failed to count free inodes: err=%d", err);
                if (err == -ERANGE) {
                        /*
                         * If nilfs_palloc_count_max_entries() returns
@@ -755,7 +759,7 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
                        break;
                case Opt_snapshot:
                        if (is_remount) {
-                               nilfs_msg(sb, KERN_ERR,
+                               nilfs_err(sb,
                                          "\"%s\" option is invalid for remount",
                                          p);
                                return 0;
@@ -771,8 +775,7 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
                        nilfs_clear_opt(nilfs, DISCARD);
                        break;
                default:
-                       nilfs_msg(sb, KERN_ERR,
-                                 "unrecognized mount option \"%s\"", p);
+                       nilfs_err(sb, "unrecognized mount option \"%s\"", p);
                        return 0;
                }
        }
@@ -808,10 +811,10 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount)
        mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
 
        if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
-               nilfs_msg(sb, KERN_WARNING, "mounting fs with errors");
+               nilfs_warn(sb, "mounting fs with errors");
 #if 0
        } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
-               nilfs_msg(sb, KERN_WARNING, "maximal mount count reached");
+               nilfs_warn(sb, "maximal mount count reached");
 #endif
        }
        if (!max_mnt_count)
@@ -874,7 +877,7 @@ int nilfs_check_feature_compatibility(struct super_block *sb,
        features = le64_to_cpu(sbp->s_feature_incompat) &
                ~NILFS_FEATURE_INCOMPAT_SUPP;
        if (features) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "couldn't mount because of unsupported optional features (%llx)",
                          (unsigned long long)features);
                return -EINVAL;
@@ -882,7 +885,7 @@ int nilfs_check_feature_compatibility(struct super_block *sb,
        features = le64_to_cpu(sbp->s_feature_compat_ro) &
                ~NILFS_FEATURE_COMPAT_RO_SUPP;
        if (!sb_rdonly(sb) && features) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "couldn't mount RDWR because of unsupported optional features (%llx)",
                          (unsigned long long)features);
                return -EINVAL;
@@ -901,12 +904,12 @@ static int nilfs_get_root_dentry(struct super_block *sb,
        inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
        if (IS_ERR(inode)) {
                ret = PTR_ERR(inode);
-               nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret);
+               nilfs_err(sb, "error %d getting root inode", ret);
                goto out;
        }
        if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
                iput(inode);
-               nilfs_msg(sb, KERN_ERR, "corrupt root inode");
+               nilfs_err(sb, "corrupt root inode");
                ret = -EINVAL;
                goto out;
        }
@@ -934,7 +937,7 @@ static int nilfs_get_root_dentry(struct super_block *sb,
        return ret;
 
  failed_dentry:
-       nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret);
+       nilfs_err(sb, "error %d getting root dentry", ret);
        goto out;
 }
 
@@ -954,7 +957,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
                ret = (ret == -ENOENT) ? -EINVAL : ret;
                goto out;
        } else if (!ret) {
-               nilfs_msg(s, KERN_ERR,
+               nilfs_err(s,
                          "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
                          (unsigned long long)cno);
                ret = -EINVAL;
@@ -963,7 +966,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
 
        ret = nilfs_attach_checkpoint(s, cno, false, &root);
        if (ret) {
-               nilfs_msg(s, KERN_ERR,
+               nilfs_err(s,
                          "error %d while loading snapshot (checkpoint number=%llu)",
                          ret, (unsigned long long)cno);
                goto out;
@@ -1060,7 +1063,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
        cno = nilfs_last_cno(nilfs);
        err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
        if (err) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "error %d while loading last checkpoint (checkpoint number=%llu)",
                          err, (unsigned long long)cno);
                goto failed_unload;
@@ -1122,8 +1125,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
        err = -EINVAL;
 
        if (!nilfs_valid_fs(nilfs)) {
-               nilfs_msg(sb, KERN_WARNING,
-                         "couldn't remount because the filesystem is in an incomplete recovery state");
+               nilfs_warn(sb,
+                          "couldn't remount because the filesystem is in an incomplete recovery state");
                goto restore_opts;
        }
 
@@ -1155,9 +1158,9 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
                        ~NILFS_FEATURE_COMPAT_RO_SUPP;
                up_read(&nilfs->ns_sem);
                if (features) {
-                       nilfs_msg(sb, KERN_WARNING,
-                                 "couldn't remount RDWR because of unsupported optional features (%llx)",
-                                 (unsigned long long)features);
+                       nilfs_warn(sb,
+                                  "couldn't remount RDWR because of unsupported optional features (%llx)",
+                                  (unsigned long long)features);
                        err = -EROFS;
                        goto restore_opts;
                }
@@ -1216,7 +1219,7 @@ static int nilfs_parse_snapshot_option(const char *option,
        return 0;
 
 parse_error:
-       nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg);
+       nilfs_err(NULL, "invalid option \"%s\": %s", option, msg);
        return 1;
 }
 
@@ -1319,7 +1322,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
        } else if (!sd.cno) {
                if (nilfs_tree_is_busy(s->s_root)) {
                        if ((flags ^ s->s_flags) & SB_RDONLY) {
-                               nilfs_msg(s, KERN_ERR,
+                               nilfs_err(s,
                                          "the device already has a %s mount.",
                                          sb_rdonly(s) ? "read-only" : "read/write");
                                err = -EBUSY;
index e60be7b..303d714 100644 (file)
@@ -263,8 +263,8 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
        err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
        up_read(&nilfs->ns_segctor_sem);
        if (err < 0) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "unable to get checkpoint stat: err=%d", err);
+               nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d",
+                         err);
                return err;
        }
 
@@ -286,8 +286,8 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
        err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
        up_read(&nilfs->ns_segctor_sem);
        if (err < 0) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "unable to get checkpoint stat: err=%d", err);
+               nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d",
+                         err);
                return err;
        }
 
@@ -405,8 +405,8 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
        err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat);
        up_read(&nilfs->ns_segctor_sem);
        if (err < 0) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "unable to get segment stat: err=%d", err);
+               nilfs_err(nilfs->ns_sb, "unable to get segment stat: err=%d",
+                         err);
                return err;
        }
 
@@ -779,15 +779,15 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr,
 
        err = kstrtouint(skip_spaces(buf), 0, &val);
        if (err) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "unable to convert string: err=%d", err);
+               nilfs_err(nilfs->ns_sb, "unable to convert string: err=%d",
+                         err);
                return err;
        }
 
        if (val < NILFS_SB_FREQ) {
                val = NILFS_SB_FREQ;
-               nilfs_msg(nilfs->ns_sb, KERN_WARNING,
-                         "superblock update frequency cannot be lesser than 10 seconds");
+               nilfs_warn(nilfs->ns_sb,
+                          "superblock update frequency cannot be lesser than 10 seconds");
        }
 
        down_write(&nilfs->ns_sem);
@@ -990,8 +990,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
        nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL);
        if (unlikely(!nilfs->ns_dev_subgroups)) {
                err = -ENOMEM;
-               nilfs_msg(sb, KERN_ERR,
-                         "unable to allocate memory for device group");
+               nilfs_err(sb, "unable to allocate memory for device group");
                goto failed_create_device_group;
        }
 
@@ -1101,15 +1100,13 @@ int __init nilfs_sysfs_init(void)
        nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj);
        if (!nilfs_kset) {
                err = -ENOMEM;
-               nilfs_msg(NULL, KERN_ERR,
-                         "unable to create sysfs entry: err=%d", err);
+               nilfs_err(NULL, "unable to create sysfs entry: err=%d", err);
                goto failed_sysfs_init;
        }
 
        err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group);
        if (unlikely(err)) {
-               nilfs_msg(NULL, KERN_ERR,
-                         "unable to create feature group: err=%d", err);
+               nilfs_err(NULL, "unable to create feature group: err=%d", err);
                goto cleanup_sysfs_init;
        }
 
index 484785c..221a1cc 100644 (file)
@@ -183,7 +183,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
                nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
        nilfs->ns_cno = nilfs->ns_last_cno + 1;
        if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
+               nilfs_err(nilfs->ns_sb,
                          "pointed segment number is out of range: segnum=%llu, nsegments=%lu",
                          (unsigned long long)nilfs->ns_segnum,
                          nilfs->ns_nsegments);
@@ -210,12 +210,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        int err;
 
        if (!valid_fs) {
-               nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
+               nilfs_warn(sb, "mounting unchecked fs");
                if (s_flags & SB_RDONLY) {
-                       nilfs_msg(sb, KERN_INFO,
-                                 "recovery required for readonly filesystem");
-                       nilfs_msg(sb, KERN_INFO,
-                                 "write access will be enabled during recovery");
+                       nilfs_info(sb,
+                                  "recovery required for readonly filesystem");
+                       nilfs_info(sb,
+                                  "write access will be enabled during recovery");
                }
        }
 
@@ -230,12 +230,11 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                        goto scan_error;
 
                if (!nilfs_valid_sb(sbp[1])) {
-                       nilfs_msg(sb, KERN_WARNING,
-                                 "unable to fall back to spare super block");
+                       nilfs_warn(sb,
+                                  "unable to fall back to spare super block");
                        goto scan_error;
                }
-               nilfs_msg(sb, KERN_INFO,
-                         "trying rollback from an earlier position");
+               nilfs_info(sb, "trying rollback from an earlier position");
 
                /*
                 * restore super block with its spare and reconfigure
@@ -248,9 +247,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                /* verify consistency between two super blocks */
                blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
                if (blocksize != nilfs->ns_blocksize) {
-                       nilfs_msg(sb, KERN_WARNING,
-                                 "blocksize differs between two super blocks (%d != %d)",
-                                 blocksize, nilfs->ns_blocksize);
+                       nilfs_warn(sb,
+                                  "blocksize differs between two super blocks (%d != %d)",
+                                  blocksize, nilfs->ns_blocksize);
                        goto scan_error;
                }
 
@@ -269,8 +268,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
 
        err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
        if (unlikely(err)) {
-               nilfs_msg(sb, KERN_ERR, "error %d while loading super root",
-                         err);
+               nilfs_err(sb, "error %d while loading super root", err);
                goto failed;
        }
 
@@ -281,28 +279,28 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
                __u64 features;
 
                if (nilfs_test_opt(nilfs, NORECOVERY)) {
-                       nilfs_msg(sb, KERN_INFO,
-                                 "norecovery option specified, skipping roll-forward recovery");
+                       nilfs_info(sb,
+                                  "norecovery option specified, skipping roll-forward recovery");
                        goto skip_recovery;
                }
                features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
                        ~NILFS_FEATURE_COMPAT_RO_SUPP;
                if (features) {
-                       nilfs_msg(sb, KERN_ERR,
+                       nilfs_err(sb,
                                  "couldn't proceed with recovery because of unsupported optional features (%llx)",
                                  (unsigned long long)features);
                        err = -EROFS;
                        goto failed_unload;
                }
                if (really_read_only) {
-                       nilfs_msg(sb, KERN_ERR,
+                       nilfs_err(sb,
                                  "write access unavailable, cannot proceed");
                        err = -EROFS;
                        goto failed_unload;
                }
                sb->s_flags &= ~SB_RDONLY;
        } else if (nilfs_test_opt(nilfs, NORECOVERY)) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "recovery cancelled because norecovery option was specified for a read/write mount");
                err = -EINVAL;
                goto failed_unload;
@@ -318,12 +316,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        up_write(&nilfs->ns_sem);
 
        if (err) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "error %d updating super block. recovery unfinished.",
                          err);
                goto failed_unload;
        }
-       nilfs_msg(sb, KERN_INFO, "recovery complete");
+       nilfs_info(sb, "recovery complete");
 
  skip_recovery:
        nilfs_clear_recovery_info(&ri);
@@ -331,7 +329,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
        return 0;
 
  scan_error:
-       nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err);
+       nilfs_err(sb, "error %d while searching super root", err);
        goto failed;
 
  failed_unload:
@@ -378,7 +376,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
                                   struct nilfs_super_block *sbp)
 {
        if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
+               nilfs_err(nilfs->ns_sb,
                          "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
                          le32_to_cpu(sbp->s_rev_level),
                          le16_to_cpu(sbp->s_minor_rev_level),
@@ -391,13 +389,11 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
 
        nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
        if (nilfs->ns_inode_size > nilfs->ns_blocksize) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "too large inode size: %d bytes",
+               nilfs_err(nilfs->ns_sb, "too large inode size: %d bytes",
                          nilfs->ns_inode_size);
                return -EINVAL;
        } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "too small inode size: %d bytes",
+               nilfs_err(nilfs->ns_sb, "too small inode size: %d bytes",
                          nilfs->ns_inode_size);
                return -EINVAL;
        }
@@ -406,8 +402,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
 
        nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
        if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
-                         "too short segment: %lu blocks",
+               nilfs_err(nilfs->ns_sb, "too short segment: %lu blocks",
                          nilfs->ns_blocks_per_segment);
                return -EINVAL;
        }
@@ -417,7 +412,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
                le32_to_cpu(sbp->s_r_segments_percentage);
        if (nilfs->ns_r_segments_percentage < 1 ||
            nilfs->ns_r_segments_percentage > 99) {
-               nilfs_msg(nilfs->ns_sb, KERN_ERR,
+               nilfs_err(nilfs->ns_sb,
                          "invalid reserved segments percentage: %lu",
                          nilfs->ns_r_segments_percentage);
                return -EINVAL;
@@ -503,16 +498,16 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
 
        if (!sbp[0]) {
                if (!sbp[1]) {
-                       nilfs_msg(sb, KERN_ERR, "unable to read superblock");
+                       nilfs_err(sb, "unable to read superblock");
                        return -EIO;
                }
-               nilfs_msg(sb, KERN_WARNING,
-                         "unable to read primary superblock (blocksize = %d)",
-                         blocksize);
+               nilfs_warn(sb,
+                          "unable to read primary superblock (blocksize = %d)",
+                          blocksize);
        } else if (!sbp[1]) {
-               nilfs_msg(sb, KERN_WARNING,
-                         "unable to read secondary superblock (blocksize = %d)",
-                         blocksize);
+               nilfs_warn(sb,
+                          "unable to read secondary superblock (blocksize = %d)",
+                          blocksize);
        }
 
        /*
@@ -534,14 +529,14 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
        }
        if (!valid[swp]) {
                nilfs_release_super_block(nilfs);
-               nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device");
+               nilfs_err(sb, "couldn't find nilfs on the device");
                return -EINVAL;
        }
 
        if (!valid[!swp])
-               nilfs_msg(sb, KERN_WARNING,
-                         "broken superblock, retrying with spare superblock (blocksize = %d)",
-                         blocksize);
+               nilfs_warn(sb,
+                          "broken superblock, retrying with spare superblock (blocksize = %d)",
+                          blocksize);
        if (swp)
                nilfs_swap_super_block(nilfs);
 
@@ -575,7 +570,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
 
        blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
        if (!blocksize) {
-               nilfs_msg(sb, KERN_ERR, "unable to set blocksize");
+               nilfs_err(sb, "unable to set blocksize");
                err = -EINVAL;
                goto out;
        }
@@ -594,7 +589,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
        blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
        if (blocksize < NILFS_MIN_BLOCK_SIZE ||
            blocksize > NILFS_MAX_BLOCK_SIZE) {
-               nilfs_msg(sb, KERN_ERR,
+               nilfs_err(sb,
                          "couldn't mount because of unsupported filesystem blocksize %d",
                          blocksize);
                err = -EINVAL;
@@ -604,7 +599,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
                int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
 
                if (blocksize < hw_blocksize) {
-                       nilfs_msg(sb, KERN_ERR,
+                       nilfs_err(sb,
                                  "blocksize %d too small for device (sector-size = %d)",
                                  blocksize, hw_blocksize);
                        err = -EINVAL;
index c80e9f4..9af548f 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -779,12 +779,6 @@ static int do_dentry_open(struct file *f,
                return 0;
        }
 
-       /* Any file opened for execve()/uselib() has to be a regular file. */
-       if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
-               error = -EACCES;
-               goto cleanup_file;
-       }
-
        if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
                error = get_write_access(inode);
                if (unlikely(error))
index eced272..a25e6c8 100644 (file)
@@ -122,6 +122,8 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
        struct iattr iattr;
        int rc;
 
+       memset(&iattr, 0, sizeof iattr);
+
        if (type == ACL_TYPE_ACCESS && acl) {
                /*
                 * posix_acl_update_mode checks to see if the permissions
@@ -138,18 +140,17 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
                        return error;
                }
 
-               if (acl) {
-                       rc = __orangefs_set_acl(inode, acl, type);
-               } else {
+               if (inode->i_mode != iattr.ia_mode)
                        iattr.ia_valid = ATTR_MODE;
-                       rc = __orangefs_setattr(inode, &iattr);
-               }
 
-               return rc;
-
-       } else {
-               return -EINVAL;
        }
+
+       rc = __orangefs_set_acl(inode, acl, type);
+
+       if (!rc && (iattr.ia_valid == ATTR_MODE))
+               rc = __orangefs_setattr(inode, &iattr);
+
+       return rc;
 }
 
 int orangefs_init_acl(struct inode *inode, struct inode *dir)
index 289b648..74a3d63 100644 (file)
@@ -149,7 +149,6 @@ static int __init orangefs_init(void)
                pr_info("%s: module version %s loaded\n",
                        __func__,
                        ORANGEFS_VERSION);
-               ret = 0;
                goto out;
        }
 
index a333cae..617db4e 100644 (file)
@@ -551,8 +551,17 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
 {
        unsigned long totalpages = totalram_pages() + total_swap_pages;
        unsigned long points = 0;
+       long badness;
+
+       badness = oom_badness(task, totalpages);
+       /*
+        * Special case OOM_SCORE_ADJ_MIN for all others scale the
+        * badness value into [0, 2000] range which we have been
+        * exporting for a long time so userspace might depend on it.
+        */
+       if (badness != LONG_MIN)
+               points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3;
 
-       points = oom_badness(task, totalpages) * 1000 / totalpages;
        seq_printf(m, "%lu\n", points);
 
        return 0;
index dbda449..5066b02 100644 (file)
@@ -786,7 +786,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
        SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
        SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
        SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
-       SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp);
+       SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
        SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
        seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
                                  mss->private_hugetlb >> 10, 7);
@@ -816,7 +816,7 @@ static int show_smap(struct seq_file *m, void *v)
 
        __show_smap(m, &mss, false);
 
-       seq_printf(m, "THPeligible:             %d\n",
+       seq_printf(m, "THPeligible:    %d\n",
                   transparent_hugepage_enabled(vma));
 
        if (arch_pkeys_enabled())
index 6b2b436..b57b3ff 100644 (file)
@@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
        size_t limit;
 
        limit = romfs_maxsize(sb);
-       if (pos >= limit)
+       if (pos >= limit || buflen > limit - pos)
                return -EIO;
-       if (buflen > limit - pos)
-               buflen = limit - pos;
 
 #ifdef CONFIG_ROMFS_ON_MTD
        if (sb->s_mtd)
index 44b6845..5b78719 100644 (file)
@@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
 {
        sigset_t mask;
 
-       if (sizemask != sizeof(sigset_t) ||
-           copy_from_user(&mask, user_mask, sizeof(mask)))
+       if (sizemask != sizeof(sigset_t))
                return -EINVAL;
+       if (copy_from_user(&mask, user_mask, sizeof(mask)))
+               return -EFAULT;
        return do_signalfd4(ufd, &mask, flags);
 }
 
@@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
 {
        sigset_t mask;
 
-       if (sizemask != sizeof(sigset_t) ||
-           copy_from_user(&mask, user_mask, sizeof(mask)))
+       if (sizemask != sizeof(sigset_t))
                return -EINVAL;
+       if (copy_from_user(&mask, user_mask, sizeof(mask)))
+               return -EFAULT;
        return do_signalfd4(ufd, &mask, 0);
 }
 
index 76bb1c8..8a19773 100644 (file)
@@ -87,7 +87,11 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
        int error, i;
        struct bio *bio;
 
-       bio = bio_alloc(GFP_NOIO, page_count);
+       if (page_count <= BIO_MAX_PAGES)
+               bio = bio_alloc(GFP_NOIO, page_count);
+       else
+               bio = bio_kmalloc(GFP_NOIO, page_count);
+
        if (!bio)
                return -ENOMEM;
 
index 2e62643..4a5b06f 100644 (file)
@@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
                     const struct fscrypt_name *nm, const struct inode *inode,
                     int deletion, int xent)
 {
-       int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
+       int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0;
        int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
        int last_reference = !!(deletion && inode->i_nlink == 0);
        struct ubifs_inode *ui = ubifs_inode(inode);
@@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
                        goto out_finish;
                }
                ui->del_cmtno = c->cmt_no;
+               orphan_added = 1;
        }
 
        err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
@@ -702,7 +703,7 @@ out_release:
        kfree(dent);
 out_ro:
        ubifs_ro_mode(c, err);
-       if (last_reference)
+       if (orphan_added)
                ubifs_delete_orphan(c, inode->i_ino);
        finish_reservation(c);
        return err;
@@ -1218,7 +1219,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        void *p;
        union ubifs_key key;
        struct ubifs_dent_node *dent, *dent2;
-       int err, dlen1, dlen2, ilen, lnum, offs, len;
+       int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0;
        int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
        int last_reference = !!(new_inode && new_inode->i_nlink == 0);
        int move = (old_dir != new_dir);
@@ -1334,6 +1335,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
                        goto out_finish;
                }
                new_ui->del_cmtno = c->cmt_no;
+               orphan_added = 1;
        }
 
        err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
@@ -1415,7 +1417,7 @@ out_release:
        release_head(c, BASEHD);
 out_ro:
        ubifs_ro_mode(c, err);
-       if (last_reference)
+       if (orphan_added)
                ubifs_delete_orphan(c, new_inode->i_ino);
 out_finish:
        finish_reservation(c);
index c97a4d5..615878e 100644 (file)
@@ -121,7 +121,7 @@ static inline const char *ubifs_compr_name(struct ubifs_info *c, int compr_type)
  * ubifs_wbuf_sync - synchronize write-buffer.
  * @wbuf: write-buffer to synchronize
  *
- * This is the same as as 'ubifs_wbuf_sync_nolock()' but it does not assume
+ * This is the same as 'ubifs_wbuf_sync_nolock()' but it does not assume
  * that the write-buffer is already locked.
  */
 static inline int ubifs_wbuf_sync(struct ubifs_wbuf *wbuf)
index 4b4b65b..c0d3e40 100644 (file)
@@ -174,7 +174,8 @@ static int create_default_filesystem(struct ubifs_info *c)
        tmp64 = (long long)max_buds * c->leb_size;
        if (big_lpt)
                sup_flags |= UBIFS_FLG_BIGLPT;
-       sup_flags |= UBIFS_FLG_DOUBLE_HASH;
+       if (ubifs_default_version > 4)
+               sup_flags |= UBIFS_FLG_DOUBLE_HASH;
 
        if (ubifs_authenticated(c)) {
                sup_flags |= UBIFS_FLG_AUTHENTICATION;
@@ -200,7 +201,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        sup->jhead_cnt     = cpu_to_le32(DEFAULT_JHEADS_CNT);
        sup->fanout        = cpu_to_le32(DEFAULT_FANOUT);
        sup->lsave_cnt     = cpu_to_le32(c->lsave_cnt);
-       sup->fmt_version   = cpu_to_le32(UBIFS_FORMAT_VERSION);
+       sup->fmt_version   = cpu_to_le32(ubifs_default_version);
        sup->time_gran     = cpu_to_le32(DEFAULT_TIME_GRAN);
        if (c->mount_opts.override_compr)
                sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
index 7fc2f3f..a2420c9 100644 (file)
 #include <linux/writeback.h>
 #include "ubifs.h"
 
+static int ubifs_default_version_set(const char *val, const struct kernel_param *kp)
+{
+       int n = 0, ret;
+
+       ret = kstrtoint(val, 10, &n);
+       if (ret != 0 || n < 4 || n > UBIFS_FORMAT_VERSION)
+               return -EINVAL;
+       return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops ubifs_default_version_ops = {
+       .set = ubifs_default_version_set,
+       .get = param_get_int,
+};
+
+int ubifs_default_version = UBIFS_FORMAT_VERSION;
+module_param_cb(default_version, &ubifs_default_version_ops, &ubifs_default_version, 0600);
+
 /*
  * Maximum amount of memory we may 'kmalloc()' without worrying that we are
  * allocating too much.
index bff6823..4ffd832 100644 (file)
@@ -1504,6 +1504,7 @@ extern const struct file_operations ubifs_dir_operations;
 extern const struct inode_operations ubifs_dir_inode_operations;
 extern const struct inode_operations ubifs_symlink_inode_operations;
 extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
+extern int ubifs_default_version;
 
 /* auth.c */
 static inline int ubifs_authenticated(const struct ubifs_info *c)
index 1da0be6..e3b69fb 100644 (file)
@@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene
        struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
        struct inode *inode;
 
-       if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
+       if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg)
                return ERR_PTR(-ESTALE);
 
        inode = ufs_iget(sb, ino);
index 6e264dd..0e4a383 100644 (file)
@@ -61,7 +61,7 @@ struct userfaultfd_ctx {
        /* waitqueue head for events */
        wait_queue_head_t event_wqh;
        /* a refile sequence protected by fault_pending_wqh lock */
-       struct seqcount refile_seq;
+       seqcount_spinlock_t refile_seq;
        /* pseudo fd refcounting */
        refcount_t refcount;
        /* userfaultfd syscall flags */
@@ -1961,7 +1961,7 @@ static void init_once_userfaultfd_ctx(void *mem)
        init_waitqueue_head(&ctx->fault_wqh);
        init_waitqueue_head(&ctx->event_wqh);
        init_waitqueue_head(&ctx->fd_wqh);
-       seqcount_init(&ctx->refile_seq);
+       seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
 }
 
 SYSCALL_DEFINE1(userfaultfd, int, flags)
index 91608d9..386b456 100644 (file)
@@ -134,6 +134,33 @@ xattr_permission(struct inode *inode, const char *name, int mask)
        return inode_permission(inode, mask);
 }
 
+/*
+ * Look for any handler that deals with the specified namespace.
+ */
+int
+xattr_supported_namespace(struct inode *inode, const char *prefix)
+{
+       const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+       const struct xattr_handler *handler;
+       size_t preflen;
+
+       if (!(inode->i_opflags & IOP_XATTR)) {
+               if (unlikely(is_bad_inode(inode)))
+                       return -EIO;
+               return -EOPNOTSUPP;
+       }
+
+       preflen = strlen(prefix);
+
+       for_each_xattr_handler(handlers, handler) {
+               if (!strncmp(xattr_prefix(handler), prefix, preflen))
+                       return 0;
+       }
+
+       return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(xattr_supported_namespace);
+
 int
 __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
               const void *value, size_t size, int flags)
@@ -204,10 +231,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
        return error;
 }
 
-
+/**
+ * __vfs_setxattr_locked: set an extended attribute while holding the inode
+ * lock
+ *
+ *  @dentry - object to perform setxattr on
+ *  @name - xattr name to set
+ *  @value - value to set @name to
+ *  @size - size of @value
+ *  @flags - flags to pass into filesystem operations
+ *  @delegated_inode - on return, will contain an inode pointer that
+ *  a delegation was broken on, NULL if none.
+ */
 int
-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
-               size_t size, int flags)
+__vfs_setxattr_locked(struct dentry *dentry, const char *name,
+               const void *value, size_t size, int flags,
+               struct inode **delegated_inode)
 {
        struct inode *inode = dentry->d_inode;
        int error;
@@ -216,15 +255,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
        if (error)
                return error;
 
-       inode_lock(inode);
        error = security_inode_setxattr(dentry, name, value, size, flags);
        if (error)
                goto out;
 
+       error = try_break_deleg(inode, delegated_inode);
+       if (error)
+               goto out;
+
        error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
 
 out:
+       return error;
+}
+EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
+
+int
+vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+               size_t size, int flags)
+{
+       struct inode *inode = dentry->d_inode;
+       struct inode *delegated_inode = NULL;
+       int error;
+
+retry_deleg:
+       inode_lock(inode);
+       error = __vfs_setxattr_locked(dentry, name, value, size, flags,
+           &delegated_inode);
        inode_unlock(inode);
+
+       if (delegated_inode) {
+               error = break_deleg_wait(&delegated_inode);
+               if (!error)
+                       goto retry_deleg;
+       }
        return error;
 }
 EXPORT_SYMBOL_GPL(vfs_setxattr);
@@ -378,8 +442,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name)
 }
 EXPORT_SYMBOL(__vfs_removexattr);
 
+/**
+ * __vfs_removexattr_locked: set an extended attribute while holding the inode
+ * lock
+ *
+ *  @dentry - object to perform setxattr on
+ *  @name - name of xattr to remove
+ *  @delegated_inode - on return, will contain an inode pointer that
+ *  a delegation was broken on, NULL if none.
+ */
 int
-vfs_removexattr(struct dentry *dentry, const char *name)
+__vfs_removexattr_locked(struct dentry *dentry, const char *name,
+               struct inode **delegated_inode)
 {
        struct inode *inode = dentry->d_inode;
        int error;
@@ -388,11 +462,14 @@ vfs_removexattr(struct dentry *dentry, const char *name)
        if (error)
                return error;
 
-       inode_lock(inode);
        error = security_inode_removexattr(dentry, name);
        if (error)
                goto out;
 
+       error = try_break_deleg(inode, delegated_inode);
+       if (error)
+               goto out;
+
        error = __vfs_removexattr(dentry, name);
 
        if (!error) {
@@ -401,12 +478,32 @@ vfs_removexattr(struct dentry *dentry, const char *name)
        }
 
 out:
+       return error;
+}
+EXPORT_SYMBOL_GPL(__vfs_removexattr_locked);
+
+int
+vfs_removexattr(struct dentry *dentry, const char *name)
+{
+       struct inode *inode = dentry->d_inode;
+       struct inode *delegated_inode = NULL;
+       int error;
+
+retry_deleg:
+       inode_lock(inode);
+       error = __vfs_removexattr_locked(dentry, name, &delegated_inode);
        inode_unlock(inode);
+
+       if (delegated_inode) {
+               error = break_deleg_wait(&delegated_inode);
+               if (!error)
+                       goto retry_deleg;
+       }
+
        return error;
 }
 EXPORT_SYMBOL_GPL(vfs_removexattr);
 
-
 /*
  * Extended attribute SET operations
  */
index 4df8754..ae9aaf1 100644 (file)
@@ -600,7 +600,7 @@ xfs_sb_quota_to_disk(
         * disk. If neither are active, we should NULL the inode.
         *
         * In all cases, the separate pquotino must remain 0 because it
-        * it beyond the "end" of the valid non-pquotino superblock.
+        * is beyond the "end" of the valid non-pquotino superblock.
         */
        if (from->sb_qflags & XFS_GQUOTA_ACCT)
                to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
index e380bd1..50f922c 100644 (file)
@@ -44,7 +44,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
 /*
  * Copy out entries of shortform attribute lists for attr_list().
  * Shortform attribute lists are not stored in hashval sorted order.
- * If the output buffer is not large enough to hold them all, then we
+ * If the output buffer is not large enough to hold them all, then
  * we have to calculate each entries' hashvalue and sort them before
  * we can begin returning them to the user.
  */
index 5bb6f22..408d1b5 100644 (file)
@@ -127,7 +127,7 @@ xfs_buf_item_size_segment(
  * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
  * in a single iovec.
  *
- * Discontiguous buffers need a format structure per region that that is being
+ * Discontiguous buffers need a format structure per region that is being
  * logged. This makes the changes in the buffer appear to log recovery as though
  * they came from separate buffers, just like would occur if multiple buffers
  * were used instead of a single discontiguous buffer. This enables
index d480f11..8f0457d 100644 (file)
@@ -948,7 +948,7 @@ xlog_recover_buf_commit_pass2(
         * or inode_cluster_size bytes, whichever is bigger.  The inode
         * buffers in the log can be a different size if the log was generated
         * by an older kernel using unclustered inode buffers or a newer kernel
-        * running with a different inode cluster size.  Regardless, if the
+        * running with a different inode cluster size.  Regardless, if
         * the inode buffer size isn't max(blocksize, inode_cluster_size)
         * for *our* value of inode_cluster_size, then we need to keep
         * the buffer out of the buffer cache so that the buffer won't
index 04dc2be..bcd73b9 100644 (file)
@@ -807,7 +807,7 @@ xfs_qm_dqget_checks(
 }
 
 /*
- * Given the file system, id, and type (UDQUOT/GDQUOT), return a locked
+ * Given the file system, id, and type (UDQUOT/GDQUOT), return a locked
  * dquot, doing an allocation (if requested) as needed.
  */
 int
index 5a4b011..465fd9e 100644 (file)
@@ -56,7 +56,7 @@ xfs_fs_encode_fh(
                fileid_type = FILEID_INO32_GEN_PARENT;
 
        /*
-        * If the the filesystem may contain 64bit inode numbers, we need
+        * If the filesystem may contain 64bit inode numbers, we need
         * to use larger file handles that can represent them.
         *
         * While we only allocate inodes that do not fit into 32 bits any
index 407d629..c06129c 100644 (file)
@@ -451,7 +451,7 @@ xfs_lock_inodes(
        /*
         * Currently supports between 2 and 5 inodes with exclusive locking.  We
         * support an arbitrary depth of locking here, but absolute limits on
-        * inodes depend on the the type of locking and the limits placed by
+        * inodes depend on the type of locking and the limits placed by
         * lockdep annotations in xfs_lock_inumorder.  These are all checked by
         * the asserts.
         */
@@ -3105,7 +3105,7 @@ out_trans_abort:
 /*
  * xfs_rename_alloc_whiteout()
  *
- * Return a referenced, unlinked, unlocked inode that that can be used as a
+ * Return a referenced, unlinked, unlocked inode that can be used as a
  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
  * crash between allocating the inode and linking it into the rename transaction
  * recovery will free the inode and we won't leak it.
index 895f61b..6c65938 100644 (file)
@@ -191,7 +191,7 @@ xfs_inode_item_format_data_fork(
                    ip->i_df.if_bytes > 0) {
                        /*
                         * Round i_bytes up to a word boundary.
-                        * The underlying memory is guaranteed to
+                        * The underlying memory is guaranteed
                         * to be there by xfs_idata_realloc().
                         */
                        data_bytes = roundup(ip->i_df.if_bytes, 4);
@@ -275,7 +275,7 @@ xfs_inode_item_format_attr_fork(
                    ip->i_afp->if_bytes > 0) {
                        /*
                         * Round i_bytes up to a word boundary.
-                        * The underlying memory is guaranteed to
+                        * The underlying memory is guaranteed
                         * to be there by xfs_idata_realloc().
                         */
                        data_bytes = roundup(ip->i_afp->if_bytes, 4);
index 0e3f62c..3abb8b9 100644 (file)
@@ -865,7 +865,7 @@ xfs_buffered_write_iomap_begin(
        }
 
        /*
-        * Search the data fork fork first to look up our source mapping.  We
+        * Search the data fork first to look up our source mapping.  We
         * always need the data fork map, as we have to return it to the
         * iomap code so that the higher level write code can read data in to
         * perform read-modify-write cycles for unaligned writes.
index 56c32ee..b0ef071 100644 (file)
@@ -239,7 +239,7 @@ xfs_cil_prepare_item(
         * this CIL context and so we need to pin it. If we are replacing the
         * old_lv, then remove the space it accounts for and make it the shadow
         * buffer for later freeing. In both cases we are now switching to the
-        * shadow buffer, so update the the pointer to it appropriately.
+        * shadow buffer, so update the pointer to it appropriately.
         */
        if (!old_lv) {
                if (lv->lv_item->li_ops->iop_pin)
index 52a65a7..e2ec91b 100644 (file)
@@ -1100,7 +1100,7 @@ xlog_verify_head(
                 *
                 * Note that xlog_find_tail() clears the blocks at the new head
                 * (i.e., the records with invalid CRC) if the cycle number
-                * matches the the current cycle.
+                * matches the current cycle.
                 */
                found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
                                buffer, rhead_blk, rhead, wrapped);
index 7b2c72b..ca93b64 100644 (file)
@@ -485,7 +485,7 @@ xfs_cui_item_recover(
         * transaction.  Normally, any work that needs to be deferred
         * gets attached to the same defer_ops that scheduled the
         * refcount update.  However, we're in log recovery here, so we
-        * we use the passed in defer_ops and to finish up any work that
+        * use the passed in defer_ops and to finish up any work that
         * doesn't fit.  We need to reserve enough blocks to handle a
         * full btree split on either end of the refcount range.
         */
index aac83f9..16098dc 100644 (file)
@@ -721,7 +721,7 @@ xfs_reflink_end_cow(
         * repeatedly cycles the ILOCK to allocate one transaction per remapped
         * extent.
         *
-        * If we're being called by writeback then the the pages will still
+        * If we're being called by writeback then the pages will still
         * have PageWriteback set, which prevents races with reflink remapping
         * and truncate.  Reflink remapping prevents races with writeback by
         * taking the iolock and mmaplock before flushing the pages and
index e9f810f..4358585 100644 (file)
@@ -32,9 +32,11 @@ xfs_sysfs_init(
        struct xfs_kobj         *parent_kobj,
        const char              *name)
 {
+       struct kobject          *parent;
+
+       parent = parent_kobj ? &parent_kobj->kobject : NULL;
        init_completion(&kobj->complete);
-       return kobject_init_and_add(&kobj->kobject, ktype,
-                                   &parent_kobj->kobject, "%s", name);
+       return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
 }
 
 static inline void
index 0c783d3..dbb69b4 100644 (file)
@@ -480,7 +480,7 @@ xfsaild_push(
                         * inode buffer is locked because we already pushed the
                         * updates to it as part of inode clustering.
                         *
-                        * We do not want to to stop flushing just because lots
+                        * We do not want to stop flushing just because lots
                         * of items are already being flushed, but we need to
                         * re-try the flushing relatively soon if most of the
                         * AIL is being flushed.
@@ -515,7 +515,7 @@ xfsaild_push(
                /*
                 * Are there too many items we can't do anything with?
                 *
-                * If we we are skipping too many items because we can't flush
+                * If we are skipping too many items because we can't flush
                 * them or they are already being flushed, we back off and
                 * given them time to complete whatever operation is being
                 * done. i.e. remove pressure from the AIL while we can't make
index 4a0bff6..8ec7c8f 100644 (file)
@@ -335,7 +335,7 @@ static void zonefs_io_error(struct inode *inode, bool write)
        struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
        unsigned int noio_flag;
        unsigned int nr_zones =
-               zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+               zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
        struct zonefs_ioerr_data err = {
                .inode = inode,
                .write = write,
@@ -398,7 +398,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize)
                goto unlock;
 
        ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
-                              zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS);
+                              zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
        if (ret) {
                zonefs_err(inode->i_sb,
                           "Zone management operation at %llu failed %d",
@@ -1053,14 +1053,16 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
 
        zi->i_ztype = type;
        zi->i_zsector = zone->start;
+       zi->i_zone_size = zone->len << SECTOR_SHIFT;
+
        zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
-                              zone->len << SECTOR_SHIFT);
+                              zone->capacity << SECTOR_SHIFT);
        zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
 
        inode->i_uid = sbi->s_uid;
        inode->i_gid = sbi->s_gid;
        inode->i_size = zi->i_wpoffset;
-       inode->i_blocks = zone->len;
+       inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
 
        inode->i_op = &zonefs_file_inode_operations;
        inode->i_fop = &zonefs_file_operations;
@@ -1167,12 +1169,18 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
                                if (zonefs_zone_type(next) != type)
                                        break;
                                zone->len += next->len;
+                               zone->capacity += next->capacity;
                                if (next->cond == BLK_ZONE_COND_READONLY &&
                                    zone->cond != BLK_ZONE_COND_OFFLINE)
                                        zone->cond = BLK_ZONE_COND_READONLY;
                                else if (next->cond == BLK_ZONE_COND_OFFLINE)
                                        zone->cond = BLK_ZONE_COND_OFFLINE;
                        }
+                       if (zone->capacity != zone->len) {
+                               zonefs_err(sb, "Invalid conventional zone capacity\n");
+                               ret = -EINVAL;
+                               goto free;
+                       }
                }
 
                /*
index ad17fef..55b3997 100644 (file)
@@ -56,6 +56,9 @@ struct zonefs_inode_info {
        /* File maximum size */
        loff_t                  i_max_size;
 
+       /* File zone size */
+       loff_t                  i_zone_size;
+
        /*
         * To serialise fully against both syscall and mmap based IO and
         * sequential file truncation, two locks are used. For serializing
index 30a3aab..dabf8cb 100644 (file)
@@ -163,7 +163,7 @@ static inline u16 readw(const volatile void __iomem *addr)
        u16 val;
 
        __io_br();
-       val = __le16_to_cpu(__raw_readw(addr));
+       val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
        __io_ar(val);
        return val;
 }
@@ -176,7 +176,7 @@ static inline u32 readl(const volatile void __iomem *addr)
        u32 val;
 
        __io_br();
-       val = __le32_to_cpu(__raw_readl(addr));
+       val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
        __io_ar(val);
        return val;
 }
@@ -212,7 +212,7 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
 static inline void writew(u16 value, volatile void __iomem *addr)
 {
        __io_bw();
-       __raw_writew(cpu_to_le16(value), addr);
+       __raw_writew((u16 __force)cpu_to_le16(value), addr);
        __io_aw();
 }
 #endif
@@ -222,7 +222,7 @@ static inline void writew(u16 value, volatile void __iomem *addr)
 static inline void writel(u32 value, volatile void __iomem *addr)
 {
        __io_bw();
-       __raw_writel(__cpu_to_le32(value), addr);
+       __raw_writel((u32 __force)__cpu_to_le32(value), addr);
        __io_aw();
 }
 #endif
@@ -474,7 +474,7 @@ static inline u16 _inw(unsigned long addr)
        u16 val;
 
        __io_pbr();
-       val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+       val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
        __io_par(val);
        return val;
 }
@@ -487,7 +487,7 @@ static inline u32 _inl(unsigned long addr)
        u32 val;
 
        __io_pbr();
-       val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+       val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
        __io_par(val);
        return val;
 }
@@ -508,7 +508,7 @@ static inline void _outb(u8 value, unsigned long addr)
 static inline void _outw(u16 value, unsigned long addr)
 {
        __io_pbw();
-       __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+       __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
        __io_paw();
 }
 #endif
@@ -518,7 +518,7 @@ static inline void _outw(u16 value, unsigned long addr)
 static inline void _outl(u32 value, unsigned long addr)
 {
        __io_pbw();
-       __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+       __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
        __io_paw();
 }
 #endif
index 9d28a5e..6492246 100644 (file)
  * in the low address range. Architectures for which this is not
  * true can't use this generic implementation.
  */
-extern unsigned int ioread8(void __iomem *);
-extern unsigned int ioread16(void __iomem *);
-extern unsigned int ioread16be(void __iomem *);
-extern unsigned int ioread32(void __iomem *);
-extern unsigned int ioread32be(void __iomem *);
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
 #ifdef CONFIG_64BIT
-extern u64 ioread64(void __iomem *);
-extern u64 ioread64be(void __iomem *);
+extern u64 ioread64(const void __iomem *);
+extern u64 ioread64be(const void __iomem *);
 #endif
 
 #ifdef readq
@@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *);
 #define ioread64_hi_lo ioread64_hi_lo
 #define ioread64be_lo_hi ioread64be_lo_hi
 #define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(void __iomem *addr);
-extern u64 ioread64_hi_lo(void __iomem *addr);
-extern u64 ioread64be_lo_hi(void __iomem *addr);
-extern u64 ioread64be_hi_lo(void __iomem *addr);
+extern u64 ioread64_lo_hi(const void __iomem *addr);
+extern u64 ioread64_hi_lo(const void __iomem *addr);
+extern u64 ioread64be_lo_hi(const void __iomem *addr);
+extern u64 ioread64be_hi_lo(const void __iomem *addr);
 #endif
 
 extern void iowrite8(u8, void __iomem *);
@@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
  * memory across multiple ports, use "memcpy_toio()"
  * and friends.
  */
-extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
 
 extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
 extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
index 1c4fd95..c5edc5e 100644 (file)
@@ -168,7 +168,6 @@ void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
 void hyperv_cleanup(void);
-void hv_setup_sched_clock(void *sched_clock);
 #else /* CONFIG_HYPERV */
 static inline bool hv_is_hyperv_initialized(void) { return false; }
 static inline bool hv_is_hibernation_supported(void) { return false; }
index 6f44810..02932ef 100644 (file)
@@ -147,7 +147,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 
 #if CONFIG_PGTABLE_LEVELS > 3
 
-#ifndef __HAVE_ARCH_PUD_FREE
+#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
 /**
  * pud_alloc_one - allocate a page for PUD-level page table
  * @mm: the mm_struct of the current context
index 66397ed..d16302d 100644 (file)
@@ -60,8 +60,8 @@ extern __visible const void __nosave_begin, __nosave_end;
 
 /* Function descriptor handling (if any).  Override in asm/sections.h */
 #ifndef dereference_function_descriptor
-#define dereference_function_descriptor(p) (p)
-#define dereference_kernel_function_descriptor(p) (p)
+#define dereference_function_descriptor(p) ((void *)(p))
+#define dereference_kernel_function_descriptor(p) ((void *)(p))
 #endif
 
 /* random extra sections (if any).  Override
index e935318..ba68ee4 100644 (file)
@@ -86,8 +86,8 @@ static inline void set_fs(mm_segment_t fs)
 }
 #endif
 
-#ifndef segment_eq
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#ifndef uaccess_kernel
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 #endif
 
 #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
index 7616ff0..5430feb 100644 (file)
  */
 #ifndef RO_AFTER_INIT_DATA
 #define RO_AFTER_INIT_DATA                                             \
+       . = ALIGN(8);                                                   \
        __start_ro_after_init = .;                                      \
        *(.data..ro_after_init)                                         \
        JUMP_TABLE_DATA                                                 \
index 531ca87..4c61dad 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP Dual-Mode Timers
  *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
  * Tarun Kanti DebBarma <tarun.kanti@ti.com>
  * Thara Gopinath <thara@ti.com>
  *
index 030981c..a250a52 100644 (file)
 #define CLK_NAND               52
 #define CLK_ECC                        53
 #define CLK_RMII_REF           54
+#define CLK_GPIO               55
 
-#define CLK_NR_CLKS           (CLK_RMII_REF + 1)
+/* system clock (part 2) */
+#define CLK_APB                        56
+#define CLK_DMAC               57
+
+#define CLK_NR_CLKS            (CLK_DMAC + 1)
 
 #endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
index 1859ce5..85cf8eb 100644 (file)
 #ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
 #define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
 
-#define JZ4780_CLK_EXCLK       0
-#define JZ4780_CLK_RTCLK       1
-#define JZ4780_CLK_APLL                2
-#define JZ4780_CLK_MPLL                3
-#define JZ4780_CLK_EPLL                4
-#define JZ4780_CLK_VPLL                5
-#define JZ4780_CLK_OTGPHY      6
-#define JZ4780_CLK_SCLKA       7
-#define JZ4780_CLK_CPUMUX      8
-#define JZ4780_CLK_CPU         9
-#define JZ4780_CLK_L2CACHE     10
-#define JZ4780_CLK_AHB0                11
-#define JZ4780_CLK_AHB2PMUX    12
-#define JZ4780_CLK_AHB2                13
-#define JZ4780_CLK_PCLK                14
-#define JZ4780_CLK_DDR         15
-#define JZ4780_CLK_VPU         16
-#define JZ4780_CLK_I2SPLL      17
-#define JZ4780_CLK_I2S         18
+#define JZ4780_CLK_EXCLK               0
+#define JZ4780_CLK_RTCLK               1
+#define JZ4780_CLK_APLL                        2
+#define JZ4780_CLK_MPLL                        3
+#define JZ4780_CLK_EPLL                        4
+#define JZ4780_CLK_VPLL                        5
+#define JZ4780_CLK_OTGPHY              6
+#define JZ4780_CLK_SCLKA               7
+#define JZ4780_CLK_CPUMUX              8
+#define JZ4780_CLK_CPU                 9
+#define JZ4780_CLK_L2CACHE             10
+#define JZ4780_CLK_AHB0                        11
+#define JZ4780_CLK_AHB2PMUX            12
+#define JZ4780_CLK_AHB2                        13
+#define JZ4780_CLK_PCLK                        14
+#define JZ4780_CLK_DDR                 15
+#define JZ4780_CLK_VPU                 16
+#define JZ4780_CLK_I2SPLL              17
+#define JZ4780_CLK_I2S                 18
 #define JZ4780_CLK_LCD0PIXCLK  19
 #define JZ4780_CLK_LCD1PIXCLK  20
-#define JZ4780_CLK_MSCMUX      21
-#define JZ4780_CLK_MSC0                22
-#define JZ4780_CLK_MSC1                23
-#define JZ4780_CLK_MSC2                24
-#define JZ4780_CLK_UHC         25
-#define JZ4780_CLK_SSIPLL      26
-#define JZ4780_CLK_SSI         27
-#define JZ4780_CLK_CIMMCLK     28
-#define JZ4780_CLK_PCMPLL      29
-#define JZ4780_CLK_PCM         30
-#define JZ4780_CLK_GPU         31
-#define JZ4780_CLK_HDMI                32
-#define JZ4780_CLK_BCH         33
-#define JZ4780_CLK_NEMC                34
-#define JZ4780_CLK_OTG0                35
-#define JZ4780_CLK_SSI0                36
-#define JZ4780_CLK_SMB0                37
-#define JZ4780_CLK_SMB1                38
-#define JZ4780_CLK_SCC         39
-#define JZ4780_CLK_AIC         40
-#define JZ4780_CLK_TSSI0       41
-#define JZ4780_CLK_OWI         42
-#define JZ4780_CLK_KBC         43
-#define JZ4780_CLK_SADC                44
-#define JZ4780_CLK_UART0       45
-#define JZ4780_CLK_UART1       46
-#define JZ4780_CLK_UART2       47
-#define JZ4780_CLK_UART3       48
-#define JZ4780_CLK_SSI1                49
-#define JZ4780_CLK_SSI2                50
-#define JZ4780_CLK_PDMA                51
-#define JZ4780_CLK_GPS         52
-#define JZ4780_CLK_MAC         53
-#define JZ4780_CLK_SMB2                54
-#define JZ4780_CLK_CIM         55
-#define JZ4780_CLK_LCD         56
-#define JZ4780_CLK_TVE         57
-#define JZ4780_CLK_IPU         58
-#define JZ4780_CLK_DDR0                59
-#define JZ4780_CLK_DDR1                60
-#define JZ4780_CLK_SMB3                61
-#define JZ4780_CLK_TSSI1       62
-#define JZ4780_CLK_COMPRESS    63
-#define JZ4780_CLK_AIC1                64
-#define JZ4780_CLK_GPVLC       65
-#define JZ4780_CLK_OTG1                66
-#define JZ4780_CLK_UART4       67
-#define JZ4780_CLK_AHBMON      68
-#define JZ4780_CLK_SMB4                69
-#define JZ4780_CLK_DES         70
-#define JZ4780_CLK_X2D         71
-#define JZ4780_CLK_CORE1       72
+#define JZ4780_CLK_MSCMUX              21
+#define JZ4780_CLK_MSC0                        22
+#define JZ4780_CLK_MSC1                        23
+#define JZ4780_CLK_MSC2                        24
+#define JZ4780_CLK_UHC                 25
+#define JZ4780_CLK_SSIPLL              26
+#define JZ4780_CLK_SSI                 27
+#define JZ4780_CLK_CIMMCLK             28
+#define JZ4780_CLK_PCMPLL              29
+#define JZ4780_CLK_PCM                 30
+#define JZ4780_CLK_GPU                 31
+#define JZ4780_CLK_HDMI                        32
+#define JZ4780_CLK_BCH                 33
+#define JZ4780_CLK_NEMC                        34
+#define JZ4780_CLK_OTG0                        35
+#define JZ4780_CLK_SSI0                        36
+#define JZ4780_CLK_SMB0                        37
+#define JZ4780_CLK_SMB1                        38
+#define JZ4780_CLK_SCC                 39
+#define JZ4780_CLK_AIC                 40
+#define JZ4780_CLK_TSSI0               41
+#define JZ4780_CLK_OWI                 42
+#define JZ4780_CLK_KBC                 43
+#define JZ4780_CLK_SADC                        44
+#define JZ4780_CLK_UART0               45
+#define JZ4780_CLK_UART1               46
+#define JZ4780_CLK_UART2               47
+#define JZ4780_CLK_UART3               48
+#define JZ4780_CLK_SSI1                        49
+#define JZ4780_CLK_SSI2                        50
+#define JZ4780_CLK_PDMA                        51
+#define JZ4780_CLK_GPS                 52
+#define JZ4780_CLK_MAC                 53
+#define JZ4780_CLK_SMB2                        54
+#define JZ4780_CLK_CIM                 55
+#define JZ4780_CLK_LCD                 56
+#define JZ4780_CLK_TVE                 57
+#define JZ4780_CLK_IPU                 58
+#define JZ4780_CLK_DDR0                        59
+#define JZ4780_CLK_DDR1                        60
+#define JZ4780_CLK_SMB3                        61
+#define JZ4780_CLK_TSSI1               62
+#define JZ4780_CLK_COMPRESS            63
+#define JZ4780_CLK_AIC1                        64
+#define JZ4780_CLK_GPVLC               65
+#define JZ4780_CLK_OTG1                        66
+#define JZ4780_CLK_UART4               67
+#define JZ4780_CLK_AHBMON              68
+#define JZ4780_CLK_SMB4                        69
+#define JZ4780_CLK_DES                 70
+#define JZ4780_CLK_X2D                 71
+#define JZ4780_CLK_CORE1               72
+#define JZ4780_CLK_EXCLK_DIV512        73
+#define JZ4780_CLK_RTC                 74
 
 #endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
index 992b67b..bdf43ad 100644 (file)
 #define GCC_MSS_Q6_MEMNOC_AXI_CLK                              128
 #define GCC_MSS_SNOC_AXI_CLK                                   129
 #define GCC_SEC_CTRL_CLK_SRC                                   130
+#define GCC_LPASS_CFG_NOC_SWAY_CLK                             131
 
 /* GCC resets */
 #define GCC_QUSB2PHY_PRIM_BCR                                  0
index 4683022..df8a6f3 100644 (file)
 #define GCC_USB_20_BCR                  6
 #define GCC_USB_30_BCR                 7
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR    8
+#define GCC_MSS_RESTART                        9
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8150.h b/include/dt-bindings/clock/qcom,gpucc-sm8150.h
new file mode 100644 (file)
index 0000000..c5b70aa
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK                         0
+#define GPU_CC_CRC_AHB_CLK                     1
+#define GPU_CC_CX_APB_CLK                      2
+#define GPU_CC_CX_GMU_CLK                      3
+#define GPU_CC_CX_SNOC_DVM_CLK                 4
+#define GPU_CC_CXO_AON_CLK                     5
+#define GPU_CC_CXO_CLK                         6
+#define GPU_CC_GMU_CLK_SRC                     7
+#define GPU_CC_GX_GMU_CLK                      8
+#define GPU_CC_PLL1                            9
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR                    0
+#define GPUCC_GPU_CC_GFX3D_AON_BCR             1
+#define GPUCC_GPU_CC_GMU_BCR                   2
+#define GPUCC_GPU_CC_GX_BCR                    3
+#define GPUCC_GPU_CC_SPDM_BCR                  4
+#define GPUCC_GPU_CC_XO_BCR                    5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC                            0
+#define GPU_GX_GDSC                            1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8250.h b/include/dt-bindings/clock/qcom,gpucc-sm8250.h
new file mode 100644 (file)
index 0000000..dc8e387
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK                         0
+#define GPU_CC_CRC_AHB_CLK                     1
+#define GPU_CC_CX_APB_CLK                      2
+#define GPU_CC_CX_GMU_CLK                      3
+#define GPU_CC_CX_SNOC_DVM_CLK                 4
+#define GPU_CC_CXO_AON_CLK                     5
+#define GPU_CC_CXO_CLK                         6
+#define GPU_CC_GMU_CLK_SRC                     7
+#define GPU_CC_GX_GMU_CLK                      8
+#define GPU_CC_PLL1                            9
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK         10
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_ACD_BCR                   0
+#define GPUCC_GPU_CC_CX_BCR                    1
+#define GPUCC_GPU_CC_GFX3D_AON_BCR             2
+#define GPUCC_GPU_CC_GMU_BCR                   3
+#define GPUCC_GPU_CC_GX_BCR                    4
+#define GPUCC_GPU_CC_XO_BCR                    5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC                            0
+#define GPU_GX_GDSC                            1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
new file mode 100644 (file)
index 0000000..a55d01d
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_LPAAUDIO_DIG_PLL                         0
+#define LPASS_LPAAUDIO_DIG_PLL_OUT_ODD                 1
+#define CORE_CLK_SRC                                   2
+#define EXT_MCLK0_CLK_SRC                              3
+#define LPAIF_PRI_CLK_SRC                              4
+#define LPAIF_SEC_CLK_SRC                              5
+#define LPASS_AUDIO_CORE_CORE_CLK                      6
+#define LPASS_AUDIO_CORE_EXT_MCLK0_CLK                 7
+#define LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK            8
+#define LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK            9
+#define LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK         10
+
+/* LPASS Core power domains */
+#define LPASS_CORE_HM_GDSCR                            0
+
+/* LPASS Audio power domains */
+#define LPASS_AUDIO_HM_GDSCR                           0
+#define LPASS_PDC_HM_GDSCR                             1
+
+#endif
index 0367c8c..f187e07 100644 (file)
@@ -48,5 +48,7 @@
 #define X1000_CLK_SSI                  33
 #define X1000_CLK_OST                  34
 #define X1000_CLK_PDMA                 35
+#define X1000_CLK_EXCLK_DIV512 36
+#define X1000_CLK_RTC                  37
 
 #endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
index 801e1d0..8845537 100644 (file)
@@ -51,5 +51,7 @@
 #define X1830_CLK_TCU                  36
 #define X1830_CLK_DTRNG                        37
 #define X1830_CLK_OST                  38
+#define X1830_CLK_EXCLK_DIV512 39
+#define X1830_CLK_RTC                  40
 
 #endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */
diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h
new file mode 100644 (file)
index 0000000..2ad0899
--- /dev/null
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chao Hao <chao.hao@mediatek.com>
+ */
+
+#ifndef _DTS_IOMMU_PORT_MT6779_H_
+#define _DTS_IOMMU_PORT_MT6779_H_
+
+#define MTK_M4U_ID(larb, port)          (((larb) << 5) | (port))
+
+#define M4U_LARB0_ID                    0
+#define M4U_LARB1_ID                    1
+#define M4U_LARB2_ID                    2
+#define M4U_LARB3_ID                    3
+#define M4U_LARB4_ID                    4
+#define M4U_LARB5_ID                    5
+#define M4U_LARB6_ID                    6
+#define M4U_LARB7_ID                    7
+#define M4U_LARB8_ID                    8
+#define M4U_LARB9_ID                    9
+#define M4U_LARB10_ID                   10
+#define M4U_LARB11_ID                   11
+
+/* larb0 */
+#define M4U_PORT_DISP_POSTMASK0                 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_OVL0_HDR          MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_OVL1_HDR          MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OVL0              MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1              MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_PVRIC0            MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_RDMA0             MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_WDMA0             MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_FAKE0             MTK_M4U_ID(M4U_LARB0_ID, 8)
+
+/* larb1 */
+#define M4U_PORT_DISP_OVL0_2L_HDR       MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_DISP_OVL1_2L_HDR       MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_DISP_OVL0_2L           MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_DISP_OVL1_2L           MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_DISP_RDMA1             MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_MDP_PVRIC0             MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_MDP_PVRIC1             MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_MDP_RDMA0              MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_MDP_RDMA1              MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_MDP_WROT0_R            MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_MDP_WROT0_W            MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_MDP_WROT1_R            MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_MDP_WROT1_W            MTK_M4U_ID(M4U_LARB1_ID, 12)
+#define M4U_PORT_DISP_FAKE1             MTK_M4U_ID(M4U_LARB1_ID, 13)
+
+/* larb2-VDEC */
+#define M4U_PORT_HW_VDEC_MC_EXT          MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_HW_VDEC_UFO_EXT         MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_HW_VDEC_PP_EXT          MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT     MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT     MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT      MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_HW_VDEC_TILE_EXT        MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_HW_VDEC_VLD_EXT         MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_HW_VDEC_VLD2_EXT        MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT      MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_HW_VDEC_UFO_ENC_EXT     MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11)
+
+/* larb3-VENC */
+#define M4U_PORT_VENC_RCPU              MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC               MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA             MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV           MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV           MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_VENC_NBM_RDMA          MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_VENC_NBM_RDMA_LITE     MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_JPGENC_Y_RDMA          MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_C_RDMA          MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_Q_TABLE                 MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGENC_BSDMA           MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_WDMA            MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_JPGDEC_BSDMA           MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_NBM_WDMA          MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA_LITE     MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_CUR_LUMA          MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_VENC_CUR_CHROMA        MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_REF_LUMA          MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_REF_CHROMA        MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4-dummy */
+
+/* larb5-IMG */
+#define M4U_PORT_IMGI_D1                MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_IMGBI_D1               MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_DMGI_D1                MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_DEPI_D1                MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define M4U_PORT_LCEI_D1                MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define M4U_PORT_SMTI_D1                MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define M4U_PORT_SMTO_D2                MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define M4U_PORT_SMTO_D1                MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define M4U_PORT_CRZO_D1                MTK_M4U_ID(M4U_LARB5_ID, 8)
+#define M4U_PORT_IMG3O_D1               MTK_M4U_ID(M4U_LARB5_ID, 9)
+#define M4U_PORT_VIPI_D1                MTK_M4U_ID(M4U_LARB5_ID, 10)
+#define M4U_PORT_WPE_RDMA1              MTK_M4U_ID(M4U_LARB5_ID, 11)
+#define M4U_PORT_WPE_RDMA0              MTK_M4U_ID(M4U_LARB5_ID, 12)
+#define M4U_PORT_WPE_WDMA               MTK_M4U_ID(M4U_LARB5_ID, 13)
+#define M4U_PORT_TIMGO_D1               MTK_M4U_ID(M4U_LARB5_ID, 14)
+#define M4U_PORT_MFB_RDMA0              MTK_M4U_ID(M4U_LARB5_ID, 15)
+#define M4U_PORT_MFB_RDMA1              MTK_M4U_ID(M4U_LARB5_ID, 16)
+#define M4U_PORT_MFB_RDMA2              MTK_M4U_ID(M4U_LARB5_ID, 17)
+#define M4U_PORT_MFB_RDMA3              MTK_M4U_ID(M4U_LARB5_ID, 18)
+#define M4U_PORT_MFB_WDMA               MTK_M4U_ID(M4U_LARB5_ID, 19)
+#define M4U_PORT_RESERVE1               MTK_M4U_ID(M4U_LARB5_ID, 20)
+#define M4U_PORT_RESERVE2               MTK_M4U_ID(M4U_LARB5_ID, 21)
+#define M4U_PORT_RESERVE3               MTK_M4U_ID(M4U_LARB5_ID, 22)
+#define M4U_PORT_RESERVE4               MTK_M4U_ID(M4U_LARB5_ID, 23)
+#define M4U_PORT_RESERVE5               MTK_M4U_ID(M4U_LARB5_ID, 24)
+#define M4U_PORT_RESERVE6               MTK_M4U_ID(M4U_LARB5_ID, 25)
+
+/* larb6-IMG-VPU */
+#define M4U_PORT_IMG_IPUO               MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define M4U_PORT_IMG_IPU3O              MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define M4U_PORT_IMG_IPUI               MTK_M4U_ID(M4U_LARB6_ID, 2)
+
+/* larb7-DVS */
+#define M4U_PORT_DVS_RDMA               MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define M4U_PORT_DVS_WDMA               MTK_M4U_ID(M4U_LARB7_ID, 1)
+#define M4U_PORT_DVP_RDMA               MTK_M4U_ID(M4U_LARB7_ID, 2)
+#define M4U_PORT_DVP_WDMA               MTK_M4U_ID(M4U_LARB7_ID, 3)
+
+/* larb8-IPESYS */
+#define M4U_PORT_FDVT_RDA               MTK_M4U_ID(M4U_LARB8_ID, 0)
+#define M4U_PORT_FDVT_RDB               MTK_M4U_ID(M4U_LARB8_ID, 1)
+#define M4U_PORT_FDVT_WRA               MTK_M4U_ID(M4U_LARB8_ID, 2)
+#define M4U_PORT_FDVT_WRB               MTK_M4U_ID(M4U_LARB8_ID, 3)
+#define M4U_PORT_FE_RD0                         MTK_M4U_ID(M4U_LARB8_ID, 4)
+#define M4U_PORT_FE_RD1                         MTK_M4U_ID(M4U_LARB8_ID, 5)
+#define M4U_PORT_FE_WR0                         MTK_M4U_ID(M4U_LARB8_ID, 6)
+#define M4U_PORT_FE_WR1                         MTK_M4U_ID(M4U_LARB8_ID, 7)
+#define M4U_PORT_RSC_RDMA0              MTK_M4U_ID(M4U_LARB8_ID, 8)
+#define M4U_PORT_RSC_WDMA               MTK_M4U_ID(M4U_LARB8_ID, 9)
+
+/* larb9-CAM */
+#define M4U_PORT_CAM_IMGO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_C          MTK_M4U_ID(M4U_LARB9_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_C          MTK_M4U_ID(M4U_LARB9_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_C          MTK_M4U_ID(M4U_LARB9_ID, 7)
+#define M4U_PORT_CAM_CAMSV_1            MTK_M4U_ID(M4U_LARB9_ID, 8)
+#define M4U_PORT_CAM_CAMSV_2            MTK_M4U_ID(M4U_LARB9_ID, 9)
+#define M4U_PORT_CAM_CAMSV_3            MTK_M4U_ID(M4U_LARB9_ID, 10)
+#define M4U_PORT_CAM_CAMSV_4            MTK_M4U_ID(M4U_LARB9_ID, 11)
+#define M4U_PORT_CAM_CAMSV_5            MTK_M4U_ID(M4U_LARB9_ID, 12)
+#define M4U_PORT_CAM_CAMSV_6            MTK_M4U_ID(M4U_LARB9_ID, 13)
+#define M4U_PORT_CAM_AAO_R1_C           MTK_M4U_ID(M4U_LARB9_ID, 14)
+#define M4U_PORT_CAM_AFO_R1_C           MTK_M4U_ID(M4U_LARB9_ID, 15)
+#define M4U_PORT_CAM_FLKO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 16)
+#define M4U_PORT_CAM_LCESO_R1_C                 MTK_M4U_ID(M4U_LARB9_ID, 17)
+#define M4U_PORT_CAM_CRZO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 18)
+#define M4U_PORT_CAM_LTMSO_R1_C                 MTK_M4U_ID(M4U_LARB9_ID, 19)
+#define M4U_PORT_CAM_RSSO_R1_C          MTK_M4U_ID(M4U_LARB9_ID, 20)
+#define M4U_PORT_CAM_CCUI               MTK_M4U_ID(M4U_LARB9_ID, 21)
+#define M4U_PORT_CAM_CCUO               MTK_M4U_ID(M4U_LARB9_ID, 22)
+#define M4U_PORT_CAM_FAKE               MTK_M4U_ID(M4U_LARB9_ID, 23)
+
+/* larb10-CAM_A */
+#define M4U_PORT_CAM_IMGO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_A          MTK_M4U_ID(M4U_LARB10_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_A          MTK_M4U_ID(M4U_LARB10_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_A          MTK_M4U_ID(M4U_LARB10_ID, 7)
+#define M4U_PORT_CAM_IMGO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 8)
+#define M4U_PORT_CAM_RRZO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 9)
+#define M4U_PORT_CAM_LSCI_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 10)
+#define M4U_PORT_CAM_BPCI_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 11)
+#define M4U_PORT_CAM_YUVO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 12)
+#define M4U_PORT_CAM_UFDI_R2_B          MTK_M4U_ID(M4U_LARB10_ID, 13)
+#define M4U_PORT_CAM_RAWI_R2_B          MTK_M4U_ID(M4U_LARB10_ID, 14)
+#define M4U_PORT_CAM_RAWI_R5_B          MTK_M4U_ID(M4U_LARB10_ID, 15)
+#define M4U_PORT_CAM_CAMSV_0            MTK_M4U_ID(M4U_LARB10_ID, 16)
+#define M4U_PORT_CAM_AAO_R1_A           MTK_M4U_ID(M4U_LARB10_ID, 17)
+#define M4U_PORT_CAM_AFO_R1_A           MTK_M4U_ID(M4U_LARB10_ID, 18)
+#define M4U_PORT_CAM_FLKO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 19)
+#define M4U_PORT_CAM_LCESO_R1_A                 MTK_M4U_ID(M4U_LARB10_ID, 20)
+#define M4U_PORT_CAM_CRZO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 21)
+#define M4U_PORT_CAM_AAO_R1_B           MTK_M4U_ID(M4U_LARB10_ID, 22)
+#define M4U_PORT_CAM_AFO_R1_B           MTK_M4U_ID(M4U_LARB10_ID, 23)
+#define M4U_PORT_CAM_FLKO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 24)
+#define M4U_PORT_CAM_LCESO_R1_B                 MTK_M4U_ID(M4U_LARB10_ID, 25)
+#define M4U_PORT_CAM_CRZO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 26)
+#define M4U_PORT_CAM_LTMSO_R1_A                 MTK_M4U_ID(M4U_LARB10_ID, 27)
+#define M4U_PORT_CAM_RSSO_R1_A          MTK_M4U_ID(M4U_LARB10_ID, 28)
+#define M4U_PORT_CAM_LTMSO_R1_B                 MTK_M4U_ID(M4U_LARB10_ID, 29)
+#define M4U_PORT_CAM_RSSO_R1_B          MTK_M4U_ID(M4U_LARB10_ID, 30)
+
+/* larb11-CAM-VPU */
+#define M4U_PORT_CAM_IPUO               MTK_M4U_ID(M4U_LARB11_ID, 0)
+#define M4U_PORT_CAM_IPU2O              MTK_M4U_ID(M4U_LARB11_ID, 1)
+#define M4U_PORT_CAM_IPU3O              MTK_M4U_ID(M4U_LARB11_ID, 2)
+#define M4U_PORT_CAM_IPUI               MTK_M4U_ID(M4U_LARB11_ID, 3)
+#define M4U_PORT_CAM_IPU2I              MTK_M4U_ID(M4U_LARB11_ID, 4)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt6779-pinfunc.h b/include/dt-bindings/pinctrl/mt6779-pinfunc.h
new file mode 100644 (file)
index 0000000..87fdc43
--- /dev/null
@@ -0,0 +1,1242 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#ifndef __MT6779_PINFUNC_H
+#define __MT6779_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_MI (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_LRCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_PCM1_SYNC (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCL_6306 (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_PTA_RXD (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_DO (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_PCM1_DO0 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SDA_6306 (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_PTA_TXD (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_BCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TDM_BCK_2ND (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_PCM1_CLK (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_CLK (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_MCK (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TDM_MCK_2ND (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI7_MI (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_PCM1_DO1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_SCL8 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI7_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_PCM1_DO2 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_SDA8 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI7_MO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_PCM1_DI (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_SCL9 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI7_CLK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SRCLKENAI1 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_SDA9 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI0 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_URXD1 (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_I2S0_MCK (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_IDDIG (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_PWM_3 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_MD_INT0 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI1 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_UTXD1 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_I2S0_BCK (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC1_CLK_A (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_TP_URXD1_AO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S1_LRCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_UCTS0 (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_DMIC1_CLK (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_KPCOL2 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_SCL8 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC1_CMD_A (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_TP_UTXD1_AO (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S1_DO (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_URTS0 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_DMIC1_DAT (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_KPROW2 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_SDA8 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC1_DAT3_A (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_TP_URXD2_AO (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S1_MCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_DMIC_CLK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ANT_SEL9 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_SCL9 (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC1_DAT0_A (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_TP_UTXD2_AO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S1_BCK (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_DMIC_DAT (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_SDA9 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC1_DAT2_A (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_PWM_3 (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_INT0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_PTA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC1_DAT1_A (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_PTA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_MFG_EJTAG_TRSTN (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_PWM_2 (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SPI0_A_MI (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SCP_SPI0_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_MFG_EJTAG_TDO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_DPI_HSYNC (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_DFD_TDO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_JTDO_SEL1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SPI0_A_MO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SCP_SPI0_MO (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_MFG_EJTAG_TDI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_DPI_VSYNC (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_DFD_TDI (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_JTDI_SEL1 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SPI0_A_CSB (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SCP_SPI0_CS (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_MFG_EJTAG_TMS (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_DPI_DE (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_DFD_TMS (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_JTMS_SEL1 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SPI0_A_CLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SCP_SPI0_CK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_MFG_EJTAG_TCK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_DPI_CK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_DFD_TCK_XI (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_JTCK_SEL1 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_PWM_0 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_CMFLASH0 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CMVREF2 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_CLKM0 (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_ANT_SEL9 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A27 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_PWM_1 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_CMFLASH1 (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CMVREF3 (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_CLKM1 (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL10 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A28 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_PWM_2 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_CMFLASH2 (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CMVREF0 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_CLKM2 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL11 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A29 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_PWM_0 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_CMFLASH3 (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_CMVREF1 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_CLKM3 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_ANT_SEL12 (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A30 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_UCTS0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL8 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_CMVREF4 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S0_LRCK (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A31 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_0 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_URTS0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA8 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_I2S0_DI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_AGPS_SYNC (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A32 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_AP_GOOD (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_SCL5 (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_SDA5 (MTK_PIN_NO(29) | 1)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_I2S1_MCK (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_I2S3_MCK (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_I2S2_MCK (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_DPI_D0 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SPI4_MI (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(30) | 6)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S1_BCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S3_BCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S2_BCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_DPI_D1 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SPI4_CSB (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_CONN_MCU_TDO (MTK_PIN_NO(31) | 6)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S1_LRCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S3_LRCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S2_LRCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_DPI_D2 (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SPI4_MO (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 6)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S2_DI (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S0_DI (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_DO (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DPI_D3 (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_SPI4_CLK (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_TMS (MTK_PIN_NO(33) | 6)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S1_DO (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S3_DO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_DPI_D4 (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_AGPS_SYNC (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_CONN_MCU_TCK (MTK_PIN_NO(34) | 6)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_TDM_LRCK (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_LRCK (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_LRCK (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_DPI_D5 (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_SPI5_A_MO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_IO_JTAG_TDI (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_PWM_2 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_TDM_BCK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I2S1_BCK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I2S5_BCK (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_DPI_D6 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CSB (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_SRCLKENAI1 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_TDM_MCK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I2S1_MCK (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I2S5_MCK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_DPI_D7 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPI5_A_MI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_IO_JTAG_TCK (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_SRCLKENAI0 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_TDM_DATA0 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I2S2_DI (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I2S5_DO (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_DPI_D8 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPI5_A_CLK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_IO_JTAG_TDO (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_TDM_DATA1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I2S1_DO (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I2S2_DI2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_DPI_D9 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_IO_JTAG_TMS (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_IDDIG (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_TDM_DATA2 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCL9 (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM_3 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_DPI_D10 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SRCLKENAI0 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_DAP_MD32_SWD (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_TDM_DATA3 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SDA9 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM_1 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_DPI_D11 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_CLKM1 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_DISP_PWM (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DSI_TE (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_LCM_RST (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SCL6 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SCL0 (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SCP_SCL1 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_SCL_6306 (MTK_PIN_NO(45) | 4)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SDA6 (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SDA0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SCP_SDA1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_SDA_6306 (MTK_PIN_NO(46) | 4)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI1_A_MI (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_KPCOL2 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_MD_URXD0 (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CONN_UART0_RXD (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_SSPM_URXD_AO (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_DBG_MON_B32 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI1_A_CSB (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_KPROW2 (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_MD_UTXD0 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CONN_UART0_TXD (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(48) | 6)
+#define PINMUX_GPIO48__FUNC_DBG_MON_B31 (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI1_A_MO (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_UCTS0 (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_PWM_1 (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_TP_URXD2_AO (MTK_PIN_NO(49) | 6)
+#define PINMUX_GPIO49__FUNC_DBG_MON_B30 (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI1_A_CLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_URTS0 (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_WIFI_TXD (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_TP_UTXD2_AO (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_DBG_MON_B29 (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SCL0 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SDA0 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_URXD0 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_UTXD0 (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_MD_URXD0 (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_MD_URXD1 (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_SSPM_URXD_AO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_CONN_UART0_RXD (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_UTXD0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_URXD0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_MD_UTXD0 (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_MD_UTXD1 (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_WIFI_TXD (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_CONN_UART0_TXD (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SCL3 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCL_6306 (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_SDA3 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_SDA_6306 (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_KPROW1 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_PWM_1 (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(57) | 3)
+#define PINMUX_GPIO57__FUNC_CLKM1 (MTK_PIN_NO(57) | 4)
+#define PINMUX_GPIO57__FUNC_IDDIG (MTK_PIN_NO(57) | 5)
+#define PINMUX_GPIO57__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(57) | 6)
+#define PINMUX_GPIO57__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_KPROW0 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_DBG_MON_B28 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_KPCOL0 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_DBG_MON_B27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_KPCOL1 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_PWM_2 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_UCTS1 (MTK_PIN_NO(60) | 3)
+#define PINMUX_GPIO60__FUNC_CLKM2 (MTK_PIN_NO(60) | 4)
+#define PINMUX_GPIO60__FUNC_USB_DRVVBUS (MTK_PIN_NO(60) | 5)
+#define PINMUX_GPIO60__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_SCL1 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_SDA1 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_SPI2_MI (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_SCP_SPI2_MI (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_KPCOL2 (MTK_PIN_NO(63) | 3)
+#define PINMUX_GPIO63__FUNC_MRG_DI (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_MD_URXD0 (MTK_PIN_NO(63) | 5)
+#define PINMUX_GPIO63__FUNC_CONN_UART0_RXD (MTK_PIN_NO(63) | 6)
+#define PINMUX_GPIO63__FUNC_DBG_MON_B26 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_SPI2_CSB (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_KPROW2 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_MRG_SYNC (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_MD_UTXD0 (MTK_PIN_NO(64) | 5)
+#define PINMUX_GPIO64__FUNC_CONN_UART0_TXD (MTK_PIN_NO(64) | 6)
+#define PINMUX_GPIO64__FUNC_DBG_MON_B25 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_SPI2_MO (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_SCP_SPI2_MO (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_SCP_SDA1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_MRG_DO (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_MD_URXD1 (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_PWM_3 (MTK_PIN_NO(65) | 6)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_SPI2_CLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_SCP_SPI2_CK (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_SCP_SCL1 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_MRG_CLK (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_MD_UTXD1 (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_WIFI_TXD (MTK_PIN_NO(66) | 6)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_I2S3_LRCK (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_I2S1_LRCK (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_URXD1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_PCM0_SYNC (MTK_PIN_NO(67) | 4)
+#define PINMUX_GPIO67__FUNC_I2S5_LRCK (MTK_PIN_NO(67) | 5)
+#define PINMUX_GPIO67__FUNC_ANT_SEL9 (MTK_PIN_NO(67) | 6)
+#define PINMUX_GPIO67__FUNC_DBG_MON_B10 (MTK_PIN_NO(67) | 7)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_I2S3_DO (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_UTXD1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_PCM0_DO (MTK_PIN_NO(68) | 4)
+#define PINMUX_GPIO68__FUNC_I2S5_DO (MTK_PIN_NO(68) | 5)
+#define PINMUX_GPIO68__FUNC_ANT_SEL10 (MTK_PIN_NO(68) | 6)
+#define PINMUX_GPIO68__FUNC_DBG_MON_B9 (MTK_PIN_NO(68) | 7)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_I2S3_MCK (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_I2S1_MCK (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_URTS1 (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_AGPS_SYNC (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_I2S5_MCK (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_DBG_MON_B8 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_I2S0_DI (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_I2S2_DI (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_KPCOL2 (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_PCM0_DI (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_I2S2_DI2 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_ANT_SEL11 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_DBG_MON_B7 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_I2S3_BCK (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_I2S1_BCK (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_KPROW2 (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_PCM0_CLK (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_I2S5_BCK (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_ANT_SEL12 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_DBG_MON_B6 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS18_PA_VM1 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_MIPI5_SCLK (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_MIPI5_SCLK (MTK_PIN_NO(73) | 3)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS17_PA_VM0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_MIPI5_SDATA (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_MIPI5_SDATA (MTK_PIN_NO(74) | 3)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(75) | 3)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS7 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_DBG_MON_B24 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS6 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_DBG_MON_B23 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS8 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_DBG_MON_B22 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS9 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_DBG_MON_B21 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS10 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_DBG_MON_B20 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS11 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_DBG_MON_B19 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS12 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS12 (MTK_PIN_NO(84) | 2)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS13 (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_BPI_BUS13 (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS14 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_BPI_BUS14 (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS15 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_BPI_BUS15 (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS16 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_BPI_BUS16 (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS5 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DBG_MON_B18 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS4 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_DBG_MON_B17 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS3 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_BPI_BUS2 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_DBG_MON_B16 (MTK_PIN_NO(92) | 7)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_BPI_BUS1 (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_BPI_BUS0 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_DBG_MON_B15 (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_MIPI0_SDATA (MTK_PIN_NO(95) | 1)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_MIPI0_SCLK (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_MIPI1_SDATA (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_MIPI1_SCLK (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_MIPI2_SCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B14 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MIPI2_SDATA (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B13 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MIPI3_SCLK (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DBG_MON_B12 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MIPI3_SDATA (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B11 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MIPI4_SCLK (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_CONN_MIPI4_SCLK (MTK_PIN_NO(103) | 2)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MIPI4_SDATA (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_CONN_MIPI4_SDATA (MTK_PIN_NO(104) | 2)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 2)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_CONN_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 2)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_CONN_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 2)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_CONN_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 2)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_CONN_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 2)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL4 (MTK_PIN_NO(110) | 1)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_SDA4 (MTK_PIN_NO(111) | 1)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SCL2 (MTK_PIN_NO(112) | 1)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_SDA2 (MTK_PIN_NO(113) | 1)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CLKM0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SPI3_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_DBG_MON_B5 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CLKM1 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SPI3_CSB (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_DBG_MON_B4 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CMMCLK0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_DBG_MON_B3 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CMMCLK1 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_DBG_MON_B2 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CLKM2 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SPI3_MO (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_DBG_MON_B1 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CLKM3 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SPI3_CLK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DBG_MON_B0 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CMMCLK2 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_CLKM2 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_ANT_SEL12 (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_TP_UCTS2_AO (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CMMCLK3 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_CLKM3 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_ANT_SEL11 (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_TP_URTS2_AO (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_CMVREF1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_PCM0_SYNC (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_SRCLKENAI1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_AGPS_SYNC (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_PWM_1 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ANT_SEL9 (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_TP_UCTS1_AO (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_PCM0_DI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMVREF2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_PCM0_CLK (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_MD_INT0 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_PWM_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_ANT_SEL10 (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_TP_URTS1_AO (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMVREF3 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_PCM0_DO (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_VPU_UDI_TMS (MTK_PIN_NO(125) | 4)
+#define PINMUX_GPIO125__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(125) | 5)
+#define PINMUX_GPIO125__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(125) | 6)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMVREF4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(126) | 6)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMVREF0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(127) | 6)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_LVTS_FOUT (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A3 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_CONN_DSP_JCK (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_LVTS_SDO (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_LVTS_26M (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_CONN_DSP_JDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_LVTS_SCK (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_CONN_DSP_JMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_LVTS_SDI (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_CONN_DSP_JDO (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_LVTS_SCF (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_PCM1_CLK (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SPI5_B_MI (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_UDI_TCK (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_JTCK_SEL3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_MSDC1_CMD (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_SPI5_B_CSB (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_UDI_TMS (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_DSP_JMS (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_JTMS_SEL3 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_MSDC1_DAT3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_PCM1_DI (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_SPI5_B_MO (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_MSDC1_DAT0 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_PCM1_DO0 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_SPI5_B_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_UDI_TDI (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_CONN_DSP_JDI (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_JTDI_SEL3 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_MSDC1_DAT2 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_PCM1_DO2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_ANT_SEL11 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_UDI_NTRST (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_MSDC1_DAT1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_ANT_SEL12 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UDI_TDO (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_CONN_DSP_JDO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_JTDO_SEL3 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_ADSP_URXD0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCL_6306 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_PTA_RXD (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_SSPM_URXD_AO (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_ADSP_UTXD0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SDA_6306 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_PTA_TXD (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(141) | 6)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(142) | 2)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A9 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_AUD_CLK_MISO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_I2S2_MCK (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_UDI_TCK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_DBG_MON_A10 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_I2S2_BCK (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_UDI_TMS (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_A11 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I2S2_DI2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_UDI_TDO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A14 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_ANT_SEL0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_PWM_3 (MTK_PIN_NO(147) | 2)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_ANT_SEL1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_SPI0_B_MI (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_SSPM_URXD_AO (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_UCTS2_AO (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_CLKM0 (MTK_PIN_NO(148) | 6)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_ANT_SEL2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_SPI0_B_CSB (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_URTS2_AO (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(149) | 6)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_ANT_SEL3 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_SPI0_B_MO (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UCTS1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_UCTS1_AO (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_IDDIG (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_SCL9 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_ANT_SEL4 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_SPI0_B_CLK (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URTS1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_URTS1_AO (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_USB_DRVVBUS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_SDA9 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_ANT_SEL5 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_SPI1_B_MI (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_URXD1_AO (MTK_PIN_NO(152) | 5)
+#define PINMUX_GPIO152__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_SCL8 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_ANT_SEL6 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_SPI1_B_CSB (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_SRCLKENAI0 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_PWM_0 (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_TP_UTXD1_AO (MTK_PIN_NO(153) | 5)
+#define PINMUX_GPIO153__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_SDA8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_ANT_SEL7 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_SPI1_B_MO (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_SRCLKENAI1 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_URXD2_AO (MTK_PIN_NO(154) | 5)
+#define PINMUX_GPIO154__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(154) | 6)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL8 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_SPI1_B_CLK (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_MD_INT0 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_TP_UTXD2_AO (MTK_PIN_NO(155) | 5)
+#define PINMUX_GPIO155__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_A15 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_CONN_TOP_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_AUXIF_CLK0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DBG_MON_A16 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_CONN_TOP_DATA (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_AUXIF_ST0 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_DBG_MON_A17 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_CONN_HRST_B (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_DBG_MON_A18 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_CONN_WB_PTA (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_DBG_MON_A19 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_CONN_BT_CLK (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_AUXIF_CLK1 (MTK_PIN_NO(160) | 2)
+#define PINMUX_GPIO160__FUNC_DBG_MON_A20 (MTK_PIN_NO(160) | 7)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_CONN_BT_DATA (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_AUXIF_ST1 (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_DBG_MON_A21 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A22 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_A23 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_DBG_MON_A24 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_DBG_MON_A25 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_DBG_MON_A26 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_MSDC0_CMD (MTK_PIN_NO(167) | 1)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_MSDC0_DAT0 (MTK_PIN_NO(168) | 1)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_MSDC0_DAT2 (MTK_PIN_NO(169) | 1)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC0_DAT4 (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC0_DAT6 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC0_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC0_DAT5 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC0_DAT7 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC0_DSL (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_ANT_SEL9 (MTK_PIN_NO(175) | 2)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_MSDC0_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_ANT_SEL10 (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_MSDC0_DAT3 (MTK_PIN_NO(177) | 1)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_MSDC0_RSTB (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(180) | 1)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_SRCLKENA0 (MTK_PIN_NO(181) | 1)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_SRCLKENA1 (MTK_PIN_NO(182) | 1)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_WATCHDOG (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(184) | 2)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_RTC32K_CK (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(189) | 1)
+#define PINMUX_GPIO189__FUNC_I2S1_MCK (MTK_PIN_NO(189) | 3)
+#define PINMUX_GPIO189__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(189) | 6)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(190) | 1)
+#define PINMUX_GPIO190__FUNC_I2S1_BCK (MTK_PIN_NO(190) | 3)
+#define PINMUX_GPIO190__FUNC_DBG_MON_A6 (MTK_PIN_NO(190) | 7)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_I2S1_LRCK (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DBG_MON_A7 (MTK_PIN_NO(191) | 7)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_DO (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(192) | 6)
+#define PINMUX_GPIO192__FUNC_DBG_MON_A8 (MTK_PIN_NO(192) | 7)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_VOW_DAT_MISO (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_LRCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_UDI_TDI (MTK_PIN_NO(193) | 5)
+#define PINMUX_GPIO193__FUNC_DBG_MON_A12 (MTK_PIN_NO(193) | 7)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_VOW_CLK_MISO (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_DI (MTK_PIN_NO(194) | 3)
+#define PINMUX_GPIO194__FUNC_UDI_NTRST (MTK_PIN_NO(194) | 5)
+#define PINMUX_GPIO194__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(194) | 6)
+#define PINMUX_GPIO194__FUNC_DBG_MON_A13 (MTK_PIN_NO(194) | 7)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(195) | 3)
+#define PINMUX_GPIO195__FUNC_VPU_UDI_TCK (MTK_PIN_NO(195) | 4)
+#define PINMUX_GPIO195__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(195) | 5)
+#define PINMUX_GPIO195__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(195) | 6)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_CMMCLK4 (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(196) | 3)
+#define PINMUX_GPIO196__FUNC_VPU_UDI_TDI (MTK_PIN_NO(196) | 4)
+#define PINMUX_GPIO196__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(196) | 5)
+#define PINMUX_GPIO196__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(196) | 6)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_VPU_UDI_TDO (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(197) | 5)
+#define PINMUX_GPIO197__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(197) | 6)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_SCL7 (MTK_PIN_NO(198) | 1)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_SDA7 (MTK_PIN_NO(199) | 1)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_URXD1 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_ADSP_URXD0 (MTK_PIN_NO(200) | 2)
+#define PINMUX_GPIO200__FUNC_TP_URXD1_AO (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SSPM_URXD_AO (MTK_PIN_NO(200) | 4)
+#define PINMUX_GPIO200__FUNC_TP_URXD2_AO (MTK_PIN_NO(200) | 5)
+#define PINMUX_GPIO200__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(200) | 6)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_UTXD1 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_ADSP_UTXD0 (MTK_PIN_NO(201) | 2)
+#define PINMUX_GPIO201__FUNC_TP_UTXD1_AO (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(201) | 4)
+#define PINMUX_GPIO201__FUNC_TP_UTXD2_AO (MTK_PIN_NO(201) | 5)
+#define PINMUX_GPIO201__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(201) | 6)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_PWM_3 (MTK_PIN_NO(202) | 1)
+#define PINMUX_GPIO202__FUNC_CLKM3 (MTK_PIN_NO(202) | 2)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+
+#endif /* __MT6779-PINFUNC_H */
index 6257180..2d2a8c7 100644 (file)
@@ -65,7 +65,7 @@
 #define DM814X_IOPAD(pa, val)          OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
 #define DM816X_IOPAD(pa, val)          OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
 #define AM33XX_IOPAD(pa, val)          OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
-#define AM33XX_PADCONF(pa, dir, mux)   OMAP_IOPAD_OFFSET((pa), 0x0800) ((dir) | (mux))
+#define AM33XX_PADCONF(pa, conf, mux)  OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux)
 
 /*
  * Macros to allow using the offset from the padconf physical address
diff --git a/include/dt-bindings/reset/actions,s500-reset.h b/include/dt-bindings/reset/actions,s500-reset.h
new file mode 100644 (file)
index 0000000..f5d9417
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Actions Semi S500 Reset Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_ACTIONS_S500_RESET_H
+#define __DT_BINDINGS_ACTIONS_S500_RESET_H
+
+#define RESET_DMAC                             0
+#define RESET_NORIF                            1
+#define RESET_DDR                              2
+#define RESET_NANDC                            3
+#define RESET_SD0                              4
+#define RESET_SD1                              5
+#define RESET_PCM1                             6
+#define RESET_DE                               7
+#define RESET_LCD                              8
+#define RESET_SD2                              9
+#define RESET_DSI                              10
+#define RESET_CSI                              11
+#define RESET_BISP                             12
+#define RESET_KEY                              13
+#define RESET_GPIO                             14
+#define RESET_AUDIO                            15
+#define RESET_PCM0                             16
+#define RESET_VDE                              17
+#define RESET_VCE                              18
+#define RESET_GPU3D                            19
+#define RESET_NIC301                           20
+#define RESET_LENS                             21
+#define RESET_PERIPHRESET                      22
+#define RESET_USB2_0                           23
+#define RESET_TVOUT                            24
+#define RESET_HDMI                             25
+#define RESET_HDCP2TX                          26
+#define RESET_UART6                            27
+#define RESET_UART0                            28
+#define RESET_UART1                            29
+#define RESET_UART2                            30
+#define RESET_SPI0                             31
+#define RESET_SPI1                             32
+#define RESET_SPI2                             33
+#define RESET_SPI3                             34
+#define RESET_I2C0                             35
+#define RESET_I2C1                             36
+#define RESET_USB3                             37
+#define RESET_UART3                            38
+#define RESET_UART4                            39
+#define RESET_UART5                            40
+#define RESET_I2C2                             41
+#define RESET_I2C3                             42
+#define RESET_ETHERNET                         43
+#define RESET_CHIPID                           44
+#define RESET_USB2_1                           45
+#define RESET_WD0RESET                         46
+#define RESET_WD1RESET                         47
+#define RESET_WD2RESET                         48
+#define RESET_WD3RESET                         49
+#define RESET_DBG0RESET                                50
+#define RESET_DBG1RESET                                51
+#define RESET_DBG2RESET                                52
+#define RESET_DBG3RESET                                53
+
+#endif /* __DT_BINDINGS_ACTIONS_S500_RESET_H */
index 75e582b..4c328fe 100644 (file)
@@ -36,7 +36,7 @@ struct dma_chan_ref {
 /**
  * async_tx_flags - modifiers for the async_* calls
  * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
- * the destination address is not a source.  The asynchronous case handles this
+ * destination address is not a source.  The asynchronous case handles this
  * implicitly, the synchronous case needs to zero the destination block.
  * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
  * also one of the source addresses.  In the synchronous case the destination
index 56e4580..614653e 100644 (file)
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 
-/* Notes on locking:
- *
- * backlight_device->ops_lock is an internal backlight lock protecting the
- * ops pointer and no code outside the core should need to touch it.
- *
- * Access to update_status() is serialised by the update_lock mutex since
- * most drivers seem to need this and historically get it wrong.
- *
- * Most drivers don't need locking on their get_brightness() method.
- * If yours does, you need to implement it in the driver. You can use the
- * update_lock mutex if appropriate.
+/**
+ * enum backlight_update_reason - what method was used to update backlight
  *
- * Any other use of the locks below is probably wrong.
+ * A driver indicates the method (reason) used for updating the backlight
+ * when calling backlight_force_update().
  */
-
 enum backlight_update_reason {
+       /**
+        * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key.
+        */
        BACKLIGHT_UPDATE_HOTKEY,
+
+       /**
+        * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs.
+        */
        BACKLIGHT_UPDATE_SYSFS,
 };
 
+/**
+ * enum backlight_type - the type of backlight control
+ *
+ * The type of interface used to control the backlight.
+ */
 enum backlight_type {
+       /**
+        * @BACKLIGHT_RAW:
+        *
+        * The backlight is controlled using hardware registers.
+        */
        BACKLIGHT_RAW = 1,
+
+       /**
+        * @BACKLIGHT_PLATFORM:
+        *
+        * The backlight is controlled using a platform-specific interface.
+        */
        BACKLIGHT_PLATFORM,
+
+       /**
+        * @BACKLIGHT_FIRMWARE:
+        *
+        * The backlight is controlled using a standard firmware interface.
+        */
        BACKLIGHT_FIRMWARE,
+
+       /**
+        * @BACKLIGHT_TYPE_MAX: Number of entries.
+        */
        BACKLIGHT_TYPE_MAX,
 };
 
+/**
+ * enum backlight_notification - the type of notification
+ *
+ * The notifications that is used for notification sent to the receiver
+ * that registered notifications using backlight_register_notifier().
+ */
 enum backlight_notification {
+       /**
+        * @BACKLIGHT_REGISTERED: The backlight device is registered.
+        */
        BACKLIGHT_REGISTERED,
+
+       /**
+        * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
+        */
        BACKLIGHT_UNREGISTERED,
 };
 
+/** enum backlight_scale - the type of scale used for brightness values
+ *
+ * The type of scale used for brightness values.
+ */
 enum backlight_scale {
+       /**
+        * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown.
+        */
        BACKLIGHT_SCALE_UNKNOWN = 0,
+
+       /**
+        * @BACKLIGHT_SCALE_LINEAR: The scale is linear.
+        *
+        * The linear scale will increase brightness the same for each step.
+        */
        BACKLIGHT_SCALE_LINEAR,
+
+       /**
+        * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear.
+        *
+        * This is often used when the brightness values tries to adjust to
+        * the relative perception of the eye demanding a non-linear scale.
+        */
        BACKLIGHT_SCALE_NON_LINEAR,
 };
 
 struct backlight_device;
 struct fb_info;
 
+/**
+ * struct backlight_ops - backlight operations
+ *
+ * The backlight operations are specified when the backlight device is registered.
+ */
 struct backlight_ops {
+       /**
+        * @options: Configure how operations are called from the core.
+        *
+        * The options parameter is used to adjust the behaviour of the core.
+        * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called
+        * upon suspend and resume.
+        */
        unsigned int options;
 
 #define BL_CORE_SUSPENDRESUME  (1 << 0)
 
-       /* Notify the backlight driver some property has changed */
+       /**
+        * @update_status: Operation called when properties have changed.
+        *
+        * Notify the backlight driver some property has changed.
+        * The update_status operation is protected by the update_lock.
+        *
+        * The backlight driver is expected to use backlight_is_blank()
+        * to check if the display is blanked and set brightness accordingly.
+        * update_status() is called when any of the properties has changed.
+        *
+        * RETURNS:
+        *
+        * 0 on success, negative error code if any failure occurred.
+        */
        int (*update_status)(struct backlight_device *);
-       /* Return the current backlight brightness (accounting for power,
-          fb_blank etc.) */
+
+       /**
+        * @get_brightness: Return the current backlight brightness.
+        *
+        * The driver may implement this as a readback from the HW.
+        * This operation is optional and if not present then the current
+        * brightness property value is used.
+        *
+        * RETURNS:
+        *
+        * A brightness value which is 0 or a positive number.
+        * On failure a negative error code is returned.
+        */
        int (*get_brightness)(struct backlight_device *);
-       /* Check if given framebuffer device is the one bound to this backlight;
-          return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
-       int (*check_fb)(struct backlight_device *, struct fb_info *);
+
+       /**
+        * @check_fb: Check the framebuffer device.
+        *
+        * Check if given framebuffer device is the one bound to this backlight.
+        * This operation is optional and if not implemented it is assumed that the
+        * fbdev is always the one bound to the backlight.
+        *
+        * RETURNS:
+        *
+        * If info is NULL or the info matches the fbdev bound to the backlight return true.
+        * If info does not match the fbdev bound to the backlight return false.
+        */
+       int (*check_fb)(struct backlight_device *bd, struct fb_info *info);
 };
 
-/* This structure defines all the properties of a backlight */
+/**
+ * struct backlight_properties - backlight properties
+ *
+ * This structure defines all the properties of a backlight.
+ */
 struct backlight_properties {
-       /* Current User requested brightness (0 - max_brightness) */
+       /**
+        * @brightness: The current brightness requested by the user.
+        *
+        * The backlight core makes sure the range is (0 to max_brightness)
+        * when the brightness is set via the sysfs attribute:
+        * /sys/class/backlight/<backlight>/brightness.
+        *
+        * This value can be set in the backlight_properties passed
+        * to devm_backlight_device_register() to set a default brightness
+        * value.
+        */
        int brightness;
-       /* Maximal value for brightness (read-only) */
+
+       /**
+        * @max_brightness: The maximum brightness value.
+        *
+        * This value must be set in the backlight_properties passed to
+        * devm_backlight_device_register() and shall not be modified by the
+        * driver after registration.
+        */
        int max_brightness;
-       /* Current FB Power mode (0: full on, 1..3: power saving
-          modes; 4: full off), see FB_BLANK_XXX */
+
+       /**
+        * @power: The current power mode.
+        *
+        * User space can configure the power mode using the sysfs
+        * attribute: /sys/class/backlight/<backlight>/bl_power
+        * When the power property is updated update_status() is called.
+        *
+        * The possible values are: (0: full on, 1 to 3: power saving
+        * modes; 4: full off), see FB_BLANK_XXX.
+        *
+        * When the backlight device is enabled @power is set
+        * to FB_BLANK_UNBLANK. When the backlight device is disabled
+        * @power is set to FB_BLANK_POWERDOWN.
+        */
        int power;
-       /* FB Blanking active? (values as for power) */
-       /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
+
+       /**
+        * @fb_blank: The power state from the FBIOBLANK ioctl.
+        *
+        * When the FBIOBLANK ioctl is called @fb_blank is set to the
+        * blank parameter and the update_status() operation is called.
+        *
+        * When the backlight device is enabled @fb_blank is set
+        * to FB_BLANK_UNBLANK. When the backlight device is disabled
+        * @fb_blank is set to FB_BLANK_POWERDOWN.
+        *
+        * Backlight drivers should avoid using this property. It has been
+        * replaced by state & BL_CORE_FBLANK (although most drivers should
+        * use backlight_is_blank() as the preferred means to get the blank
+        * state).
+        *
+        * fb_blank is deprecated and will be removed.
+        */
        int fb_blank;
-       /* Backlight type */
+
+       /**
+        * @type: The type of backlight supported.
+        *
+        * The backlight type allows userspace to make appropriate
+        * policy decisions based on the backlight type.
+        *
+        * This value must be set in the backlight_properties
+        * passed to devm_backlight_device_register().
+        */
        enum backlight_type type;
-       /* Flags used to signal drivers of state changes */
+
+       /**
+        * @state: The state of the backlight core.
+        *
+        * The state is a bitmask. BL_CORE_FBBLANK is set when the display
+        * is expected to be blank. BL_CORE_SUSPENDED is set when the
+        * driver is suspended.
+        *
+        * backlight drivers are expected to use backlight_is_blank()
+        * in their update_status() operation rather than reading the
+        * state property.
+        *
+        * The state is maintained by the core and drivers may not modify it.
+        */
        unsigned int state;
-       /* Type of the brightness scale (linear, non-linear, ...) */
-       enum backlight_scale scale;
 
 #define BL_CORE_SUSPENDED      (1 << 0)        /* backlight is suspended */
 #define BL_CORE_FBBLANK                (1 << 1)        /* backlight is under an fb blank event */
 
+       /**
+        * @scale: The type of the brightness scale.
+        */
+       enum backlight_scale scale;
 };
 
+/**
+ * struct backlight_device - backlight device data
+ *
+ * This structure holds all data required by a backlight device.
+ */
 struct backlight_device {
-       /* Backlight properties */
+       /**
+        * @props: Backlight properties
+        */
        struct backlight_properties props;
 
-       /* Serialise access to update_status method */
+       /**
+        * @update_lock: The lock used when calling the update_status() operation.
+        *
+        * update_lock is an internal backlight lock that serialise access
+        * to the update_status() operation. The backlight core holds the update_lock
+        * when calling the update_status() operation. The update_lock shall not
+        * be used by backlight drivers.
+        */
        struct mutex update_lock;
 
-       /* This protects the 'ops' field. If 'ops' is NULL, the driver that
-          registered this device has been unloaded, and if class_get_devdata()
-          points to something in the body of that driver, it is also invalid. */
+       /**
+        * @ops_lock: The lock used around everything related to backlight_ops.
+        *
+        * ops_lock is an internal backlight lock that protects the ops pointer
+        * and is used around all accesses to ops and when the operations are
+        * invoked. The ops_lock shall not be used by backlight drivers.
+        */
        struct mutex ops_lock;
+
+       /**
+        * @ops: Pointer to the backlight operations.
+        *
+        * If ops is NULL, the driver that registered this device has been unloaded,
+        * and if class_get_devdata() points to something in the body of that driver,
+        * it is also invalid.
+        */
        const struct backlight_ops *ops;
 
-       /* The framebuffer notifier block */
+       /**
+        * @fb_notif: The framebuffer notifier block
+        */
        struct notifier_block fb_notif;
 
-       /* list entry of all registered backlight devices */
+       /**
+        * @entry: List entry of all registered backlight devices
+        */
        struct list_head entry;
 
+       /**
+        * @dev: Parent device.
+        */
        struct device dev;
 
-       /* Multiple framebuffers may share one backlight device */
+       /**
+        * @fb_bl_on: The state of individual fbdev's.
+        *
+        * Multiple fbdev's may share one backlight device. The fb_bl_on
+        * records the state of the individual fbdev.
+        */
        bool fb_bl_on[FB_MAX];
 
+       /**
+        * @use_count: The number of uses of fb_bl_on.
+        */
        int use_count;
 };
 
+/**
+ * backlight_update_status - force an update of the backlight device status
+ * @bd: the backlight device
+ */
 static inline int backlight_update_status(struct backlight_device *bd)
 {
        int ret = -ENOENT;
@@ -166,49 +389,83 @@ static inline int backlight_disable(struct backlight_device *bd)
 }
 
 /**
- * backlight_put - Drop backlight reference
- * @bd: the backlight device to put
+ * backlight_is_blank - Return true if display is expected to be blank
+ * @bd: the backlight device
+ *
+ * Display is expected to be blank if any of these is true::
+ *
+ *   1) if power in not UNBLANK
+ *   2) if fb_blank is not UNBLANK
+ *   3) if state indicate BLANK or SUSPENDED
+ *
+ * Returns true if display is expected to be blank, false otherwise.
  */
-static inline void backlight_put(struct backlight_device *bd)
+static inline bool backlight_is_blank(const struct backlight_device *bd)
 {
-       if (bd)
-               put_device(&bd->dev);
+       return bd->props.power != FB_BLANK_UNBLANK ||
+              bd->props.fb_blank != FB_BLANK_UNBLANK ||
+              bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
 }
 
-extern struct backlight_device *backlight_device_register(const char *name,
-       struct device *dev, void *devdata, const struct backlight_ops *ops,
-       const struct backlight_properties *props);
-extern struct backlight_device *devm_backlight_device_register(
-       struct device *dev, const char *name, struct device *parent,
-       void *devdata, const struct backlight_ops *ops,
-       const struct backlight_properties *props);
-extern void backlight_device_unregister(struct backlight_device *bd);
-extern void devm_backlight_device_unregister(struct device *dev,
-                                       struct backlight_device *bd);
-extern void backlight_force_update(struct backlight_device *bd,
-                                  enum backlight_update_reason reason);
-extern int backlight_register_notifier(struct notifier_block *nb);
-extern int backlight_unregister_notifier(struct notifier_block *nb);
-extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+/**
+ * backlight_get_brightness - Returns the current brightness value
+ * @bd: the backlight device
+ *
+ * Returns the current brightness value, taking in consideration the current
+ * state. If backlight_is_blank() returns true then return 0 as brightness
+ * otherwise return the current brightness property value.
+ *
+ * Backlight drivers are expected to use this function in their update_status()
+ * operation to get the brightness value.
+ */
+static inline int backlight_get_brightness(const struct backlight_device *bd)
+{
+       if (backlight_is_blank(bd))
+               return 0;
+       else
+               return bd->props.brightness;
+}
+
+struct backlight_device *
+backlight_device_register(const char *name, struct device *dev, void *devdata,
+                         const struct backlight_ops *ops,
+                         const struct backlight_properties *props);
+struct backlight_device *
+devm_backlight_device_register(struct device *dev, const char *name,
+                              struct device *parent, void *devdata,
+                              const struct backlight_ops *ops,
+                              const struct backlight_properties *props);
+void backlight_device_unregister(struct backlight_device *bd);
+void devm_backlight_device_unregister(struct device *dev,
+                                     struct backlight_device *bd);
+void backlight_force_update(struct backlight_device *bd,
+                           enum backlight_update_reason reason);
+int backlight_register_notifier(struct notifier_block *nb);
+int backlight_unregister_notifier(struct notifier_block *nb);
 struct backlight_device *backlight_device_get_by_name(const char *name);
-extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+int backlight_device_set_brightness(struct backlight_device *bd,
+                                   unsigned long brightness);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
+/**
+ * bl_get_data - access devdata
+ * @bl_dev: pointer to backlight device
+ *
+ * When a backlight device is registered the driver has the possibility
+ * to supply a void * devdata. bl_get_data() return a pointer to the
+ * devdata.
+ *
+ * RETURNS:
+ *
+ * pointer to devdata stored while registering the backlight device.
+ */
 static inline void * bl_get_data(struct backlight_device *bl_dev)
 {
        return dev_get_drvdata(&bl_dev->dev);
 }
 
-struct generic_bl_info {
-       const char *name;
-       int max_intensity;
-       int default_intensity;
-       int limit_mask;
-       void (*set_bl_intensity)(int intensity);
-       void (*kick_battery)(void);
-};
-
 #ifdef CONFIG_OF
 struct backlight_device *of_find_backlight_by_node(struct device_node *node);
 #else
@@ -220,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node)
 #endif
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-struct backlight_device *of_find_backlight(struct device *dev);
 struct backlight_device *devm_of_find_backlight(struct device *dev);
 #else
-static inline struct backlight_device *of_find_backlight(struct device *dev)
-{
-       return NULL;
-}
-
 static inline struct backlight_device *
 devm_of_find_backlight(struct device *dev)
 {
index 48ea093..4e035ac 100644 (file)
@@ -77,7 +77,7 @@
  */
 #define FIELD_FIT(_mask, _val)                                         \
        ({                                                              \
-               __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: ");     \
+               __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: ");     \
                !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
        })
 
index cef4ef0..55f694b 100644 (file)
@@ -1214,15 +1214,17 @@ struct bpf_iter_aux_info {
        struct bpf_map *map;
 };
 
-typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog,
-                                      struct bpf_iter_aux_info *aux);
+typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
+                                       union bpf_iter_link_info *linfo,
+                                       struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
 
 #define BPF_ITER_CTX_ARG_MAX 2
 struct bpf_iter_reg {
        const char *target;
-       bpf_iter_check_target_t check_target;
+       bpf_iter_attach_target_t attach_target;
+       bpf_iter_detach_target_t detach_target;
        u32 ctx_arg_info_size;
-       enum bpf_iter_link_info req_linfo;
        struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
        const struct bpf_iter_seq_info *seq_info;
 };
index 68f858c..243ee54 100644 (file)
@@ -10,7 +10,7 @@
  *
  * A B+Tree is a data structure for looking up arbitrary (currently allowing
  * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
- * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not
  * use binary search to find the key on lookups.
  *
  * Each B+Tree consists of a head, that contains bookkeeping information and
index 39e6f4c..fcd84e8 100644 (file)
@@ -58,7 +58,7 @@
  *    because 10.2.z (jewel) did not care if its peers advertised this
  *    feature bit.
  *
- *  - In the second phase we stop advertising the the bit and call it
+ *  - In the second phase we stop advertising the bit and call it
  *    RETIRED.  This can normally be done in the *next* major release
  *    following the one in which we marked the feature DEPRECATED.  In
  *    the above example, for 12.0.z (luminous) we can say:
index ebf5ba6..455e9b9 100644 (file)
@@ -130,6 +130,7 @@ struct ceph_dir_layout {
 #define CEPH_MSG_CLIENT_REQUEST         24
 #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
 #define CEPH_MSG_CLIENT_REPLY           26
+#define CEPH_MSG_CLIENT_METRICS         29
 #define CEPH_MSG_CLIENT_CAPS            0x310
 #define CEPH_MSG_CLIENT_LEASE           0x311
 #define CEPH_MSG_CLIENT_SNAP            0x312
index e5ed1c5..c8645f0 100644 (file)
@@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep;
 extern struct kmem_cache *ceph_file_cachep;
 extern struct kmem_cache *ceph_dir_file_cachep;
 extern struct kmem_cache *ceph_mds_request_cachep;
+extern mempool_t *ceph_wb_pagevec_pool;
 
 /* ceph_common.c */
 extern bool libceph_compatible(void *data);
index c60b59e..83fa08a 100644 (file)
@@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
        &__oreq->r_ops[__whch].typ.fld;                                 \
 })
 
-extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
                            unsigned int which, u16 opcode, u32 flags);
 
 extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
index 6f815be..03a5de5 100644 (file)
@@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw);
 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
 unsigned int __clk_get_enable_count(struct clk *clk);
 unsigned long clk_hw_get_rate(const struct clk_hw *hw);
-unsigned long __clk_get_flags(struct clk *clk);
 unsigned long clk_hw_get_flags(const struct clk_hw *hw);
 #define clk_hw_can_set_rate_parent(hw) \
        (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
index 49a53a1..a4f82e8 100644 (file)
@@ -59,6 +59,7 @@
 #define AT91_PMC_PLL_UPDT              0x1C            /* PMC PLL update register [for SAM9X60] */
 #define                AT91_PMC_PLL_UPDT_UPDATE        (1 << 8)        /* Update PLL settings */
 #define                AT91_PMC_PLL_UPDT_ID            (1 << 0)        /* PLL ID */
+#define                AT91_PMC_PLL_UPDT_ID_MSK        (0xf)           /* PLL ID mask */
 #define                AT91_PMC_PLL_UPDT_STUPTIM       (0xff << 16)    /* Startup time */
 
 #define        AT91_CKGR_MOR           0x20                    /* Main Oscillator Register [not on SAM9RL] */
 #define                        AT91_PMC_PLLADIV2_ON            (1 << 12)
 #define                AT91_PMC_H32MXDIV       BIT(24)
 
+#define AT91_PMC_XTALF         0x34                    /* Main XTAL Frequency Register [SAMA7G5 only] */
+
 #define        AT91_PMC_USB            0x38                    /* USB Clock Register [some SAM9 only] */
 #define                AT91_PMC_USBS           (0x1 <<  0)             /* USB OHCI Input clock selection */
 #define                        AT91_PMC_USBS_PLLA              (0 << 0)
 #define                AT91_PMC_MOSCRCS        (1 << 17)               /* Main On-Chip RC [some SAM9] */
 #define                AT91_PMC_CFDEV          (1 << 18)               /* Clock Failure Detector Event [some SAM9] */
 #define                AT91_PMC_GCKRDY         (1 << 24)               /* Generated Clocks */
+#define                AT91_PMC_MCKXRDY        (1 << 26)               /* Master Clock x [x=1..4] Ready Status */
 #define        AT91_PMC_IMR            0x6c                    /* Interrupt Mask Register */
 
 #define AT91_PMC_FSMR          0x70            /* Fast Startup Mode Register */
index 6fa0eea..25a521d 100644 (file)
@@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
 
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
+extern unsigned int sysctl_compaction_proactiveness;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
                        void *buffer, size_t *length, loff_t *ppos);
 extern int sysctl_extfrag_threshold;
 extern int sysctl_compact_unevictable_allowed;
 
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
                unsigned int order, unsigned int alloc_flags,
index c4255d8..d38c4d7 100644 (file)
@@ -851,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
 asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
                                unsigned flags);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
 
 /* obsolete: fs/readdir.c */
 asmlinkage long compat_sys_old_readdir(unsigned int fd,
index 8a072d0..cee0c72 100644 (file)
@@ -40,7 +40,7 @@
 #endif
 
 /*
- * Not all versions of clang implement the the type-generic versions
+ * Not all versions of clang implement the type-generic versions
  * of the builtin overflow checkers. Fortunately, clang implements
  * __has_builtin allowing us to avoid awkward version
  * checks. Unfortunately, we don't know which version of gcc clang
index 2e231ba..4b33cb3 100644 (file)
@@ -5,48 +5,54 @@
 #ifndef __ASSEMBLY__
 
 #ifdef __CHECKER__
+/* address spaces */
 # define __kernel      __attribute__((address_space(0)))
 # define __user                __attribute__((noderef, address_space(__user)))
-# define __safe                __attribute__((safe))
-# define __force       __attribute__((force))
-# define __nocast      __attribute__((nocast))
 # define __iomem       __attribute__((noderef, address_space(__iomem)))
+# define __percpu      __attribute__((noderef, address_space(__percpu)))
+# define __rcu         __attribute__((noderef, address_space(__rcu)))
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+/* context/locking */
 # define __must_hold(x)        __attribute__((context(x,1,1)))
 # define __acquires(x) __attribute__((context(x,0,1)))
 # define __releases(x) __attribute__((context(x,1,0)))
 # define __acquire(x)  __context__(x,1)
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu      __attribute__((noderef, address_space(__percpu)))
-# define __rcu         __attribute__((noderef, address_space(__rcu)))
+/* other */
+# define __force       __attribute__((force))
+# define __nocast      __attribute__((nocast))
+# define __safe                __attribute__((safe))
 # define __private     __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
 #else /* __CHECKER__ */
+/* address spaces */
+# define __kernel
 # ifdef STRUCTLEAK_PLUGIN
-#  define __user __attribute__((user))
+#  define __user       __attribute__((user))
 # else
 #  define __user
 # endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
 # define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
+# define __percpu
+# define __rcu
+# define __chk_user_ptr(x)     (void)0
+# define __chk_io_ptr(x)       (void)0
+/* context/locking */
 # define __must_hold(x)
 # define __acquires(x)
 # define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
+# define __acquire(x)  (void)0
+# define __release(x)  (void)0
 # define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
+/* other */
+# define __force
+# define __nocast
+# define __safe
 # define __private
 # define ACCESS_PRIVATE(p, member) ((p)->member)
+# define __builtin_warning(x, y...) (1)
 #endif /* __CHECKER__ */
 
 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
index 58687a5..8f141d4 100644 (file)
@@ -576,6 +576,8 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
 int cpufreq_register_governor(struct cpufreq_governor *governor);
 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_start_governor(struct cpufreq_policy *policy);
+void cpufreq_stop_governor(struct cpufreq_policy *policy);
 
 #define cpufreq_governor_init(__governor)                      \
 static int __init __governor##_init(void)                      \
index a2710e6..3215023 100644 (file)
@@ -132,6 +132,7 @@ enum cpuhp_state {
        CPUHP_AP_MIPS_GIC_TIMER_STARTING,
        CPUHP_AP_ARC_TIMER_STARTING,
        CPUHP_AP_RISCV_TIMER_STARTING,
+       CPUHP_AP_CLINT_TIMER_STARTING,
        CPUHP_AP_CSKY_TIMER_STARTING,
        CPUHP_AP_HYPERV_TIMER_STARTING,
        CPUHP_AP_KVM_STARTING,
index 525510a..6594dbc 100644 (file)
@@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
 
 #define VMCOREINFO_OSRELEASE(value) \
        vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_BUILD_ID(value) \
+       vmcoreinfo_append_str("BUILD-ID=%s\n", value)
 #define VMCOREINFO_PAGESIZE(value) \
        vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
 #define VMCOREINFO_SYMBOL(name) \
@@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data;
 extern size_t vmcoreinfo_size;
 extern u32 *vmcoreinfo_note;
 
+/* raw contents of kernel .notes section */
+extern const void __start_notes __weak;
+extern const void __stop_notes __weak;
+
 Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
                          void *data, size_t data_len);
 void final_note(Elf_Word *buf);
index 33c16f2..2f811ba 100644 (file)
@@ -17,7 +17,7 @@
  * The algorithm was originally described in detail in this paper
  * (although the algorithm has evolved somewhat since then):
  *
- *     http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ *     https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
  *
  * LGPL2
  */
index a81f0c3..65d975b 100644 (file)
@@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat;
 struct dentry {
        /* RCU lookup touched fields */
        unsigned int d_flags;           /* protected by d_lock */
-       seqcount_t d_seq;               /* per dentry seqlock */
+       seqcount_spinlock_t d_seq;      /* per dentry seqlock */
        struct hlist_bl_node d_hash;    /* lookup hash list */
        struct dentry *d_parent;        /* parent directory */
        struct qstr d_name;
index 5e016a4..1d0e2ce 100644 (file)
@@ -16,7 +16,7 @@
  *  3. CPU clock rate changes.
  *
  * Please see this thread:
- *   http://lists.openwall.net/linux-kernel/2011/01/09/56
+ *   https://lists.openwall.net/linux-kernel/2011/01/09/56
  */
 
 #include <linux/kernel.h>
index 4208f94..7b3b04b 100644 (file)
@@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
 
 extern void debug_dma_dump_mappings(struct device *dev);
 
-extern void debug_dma_assert_idle(struct page *page);
-
 #else /* CONFIG_DMA_API_DEBUG */
 
 static inline void dma_debug_add_bus(struct bus_type *bus)
@@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev)
 {
 }
 
-static inline void debug_dma_assert_idle(struct page *page)
-{
-}
-
 #endif /* CONFIG_DMA_API_DEBUG */
 
 #endif /* __DMA_DEBUG_H */
index 5a3ce2a..6e87225 100644 (file)
@@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
 }
 
 u64 dma_direct_get_required_mask(struct device *dev);
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
-                                 u64 *phys_mask);
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs);
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
index 016b96b..52635e9 100644 (file)
@@ -522,8 +522,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
                        pgprot_t prot, const void *caller);
 void dma_common_free_remap(void *cpu_addr, size_t size);
 
-void *dma_alloc_from_pool(struct device *dev, size_t size,
-                         struct page **ret_page, gfp_t flags);
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+               void **cpu_addr, gfp_t flags,
+               bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
 
 int
index ee50d10..d44a77e 100644 (file)
@@ -46,8 +46,6 @@
 #include <linux/rcupdate.h>
 
 extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
 
 /**
  * struct dma_resv_list - a list of shared fences
@@ -71,7 +69,7 @@ struct dma_resv_list {
  */
 struct dma_resv {
        struct ww_mutex lock;
-       seqcount_t seq;
+       seqcount_ww_mutex_t seq;
 
        struct dma_fence __rcu *fence_excl;
        struct dma_resv_list __rcu *fence;
index 61d5cc0..1962f75 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
  */
 
 #ifndef K3_PSIL_H_
index caadbab..5eb34ad 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *  Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
  */
 
 #ifndef K3_UDMA_GLUE_H_
index 579356a..5896441 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CPPI5 descriptors interface
  *
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
  */
 
 #ifndef __TI_CPPI5_H__
index d7bf029..6556582 100644 (file)
@@ -48,6 +48,7 @@ struct dmar_drhd_unit {
        u16     segment;                /* PCI domain           */
        u8      ignored:1;              /* ignore drhd          */
        u8      include_all:1;
+       u8      gfx_dedicated:1;        /* graphic dedicated    */
        struct intel_iommu *iommu;
 };
 
index 99fc06f..407c2f2 100644 (file)
@@ -38,6 +38,8 @@
 
 #ifdef __KERNEL__
 
+#include <asm/bug.h>
+
 struct dql {
        /* Fields accessed in enqueue path (dql_queued) */
        unsigned int    num_queued;             /* Total ever queued */
index d896b86..3ceb72b 100644 (file)
@@ -178,7 +178,7 @@ struct fid {
  * get_name:
  *    @get_name should find a name for the given @child in the given @parent
  *    directory.  The name should be stored in the @name (with the
- *    understanding that it is already pointing to a %NAME_MAX+1 sized
+ *    understanding that it is already pointing to a %NAME_MAX+1 sized
  *    buffer.   get_name() should return %0 on success, a negative error code
  *    or error.  @get_name will be called without @parent->i_mutex held.
  *
index 2b530e6..850f79e 100644 (file)
@@ -400,8 +400,6 @@ struct fb_tile_ops {
 #define FBINFO_HWACCEL_YPAN            0x2000 /* optional */
 #define FBINFO_HWACCEL_YWRAP           0x4000 /* optional */
 
-#define FBINFO_MISC_USEREVENT          0x10000 /* event request
-                                                 from userspace */
 #define FBINFO_MISC_TILEBLITTING       0x20000 /* use tile blitting */
 
 /* A driver may set this flag to indicate that it does want a set_par to be
index 6d77598..b07d88c 100644 (file)
@@ -10,7 +10,7 @@
 /*
  * Return code to denote that requested number of
  * frontswap pages are unused(moved to page cache).
- * Used in in shmem_unuse and try_to_unuse.
+ * Used in shmem_unuse and try_to_unuse.
  */
 #define FRONTSWAP_PAGES_UNUSED 2
 
index 407881e..e019ea2 100644 (file)
@@ -518,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
        up_read(&mapping->i_mmap_rwsem);
 }
 
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+       lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+       lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
 /*
  * Might pages of this file be mapped into userspace?
  */
@@ -2650,7 +2660,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err)
 }
 
 /**
- * filemap_check_wb_error - has an error occurred since the mark was sampled?
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
  * @mapping: mapping to check for writeback errors
  * @since: previously-sampled errseq_t
  *
@@ -3312,7 +3322,7 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
        if (flags & RWF_NOWAIT) {
                if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
                        return -EOPNOTSUPP;
-               kiocb_flags |= IOCB_NOWAIT;
+               kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO;
        }
        if (flags & RWF_HIPRI)
                kiocb_flags |= IOCB_HIPRI;
index cf1015a..783b48d 100644 (file)
@@ -9,7 +9,7 @@
 struct fs_struct {
        int users;
        spinlock_t lock;
-       seqcount_t seq;
+       seqcount_spinlock_t seq;
        int umask;
        int in_exec;
        struct path root, pwd;
index 02393c0..bfd0032 100644 (file)
@@ -44,7 +44,7 @@
 struct genradix_root;
 
 struct __genradix {
-       struct genradix_root __rcu      *root;
+       struct genradix_root            *root;
 };
 
 /*
index d6e82e3..14e6202 100644 (file)
@@ -73,7 +73,7 @@ static inline void kunmap(struct page *page)
  * no global lock is needed and because the kmap code must perform a global TLB
  * invalidation when the kmap pool wraps.
  *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  *
  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
index 15c8ac3..107cedd 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/percpu.h>
+#include <linux/seqlock.h>
 #include <linux/timer.h>
 #include <linux/timerqueue.h>
 
@@ -159,7 +160,7 @@ struct hrtimer_clock_base {
        struct hrtimer_cpu_base *cpu_base;
        unsigned int            index;
        clockid_t               clockid;
-       seqcount_t              seq;
+       seqcount_raw_spinlock_t seq;
        struct hrtimer          *running;
        struct timerqueue_head  active;
        ktime_t                 (*get_time)(void);
index 17c4c49..8a8bc46 100644 (file)
@@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 #define transparent_hugepage_use_zero_page()                           \
        (transparent_hugepage_flags &                                   \
         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-#ifdef CONFIG_DEBUG_VM
-#define transparent_hugepage_debug_cow()                               \
-       (transparent_hugepage_flags &                                   \
-        (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
-#else /* CONFIG_DEBUG_VM */
-#define transparent_hugepage_debug_cow() 0
-#endif /* CONFIG_DEBUG_VM */
 
 extern unsigned long thp_get_unmapped_area(struct file *filp,
                unsigned long addr, unsigned long len, unsigned long pgoff,
@@ -265,9 +258,36 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
        else
                return NULL;
 }
-static inline int hpage_nr_pages(struct page *page)
+
+/**
+ * thp_head - Head page of a transparent huge page.
+ * @page: Any page (tail, head or regular) found in the page cache.
+ */
+static inline struct page *thp_head(struct page *page)
 {
-       if (unlikely(PageTransHuge(page)))
+       return compound_head(page);
+}
+
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
+       if (PageHead(page))
+               return HPAGE_PMD_ORDER;
+       return 0;
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
+{
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
+       if (PageHead(page))
                return HPAGE_PMD_NR;
        return 1;
 }
@@ -324,9 +344,21 @@ static inline struct list_head *page_deferred_list(struct page *page)
 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
 
-static inline int hpage_nr_pages(struct page *page)
+static inline struct page *thp_head(struct page *page)
+{
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
+       return page;
+}
+
+static inline unsigned int thp_order(struct page *page)
+{
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
+       return 0;
+}
+
+static inline int thp_nr_pages(struct page *page)
 {
-       VM_BUG_ON_PAGE(PageTail(page), page);
+       VM_BUG_ON_PGFLAGS(PageTail(page), page);
        return 1;
 }
 
@@ -450,4 +482,15 @@ static inline bool thp_migration_supported(void)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+       return PAGE_SIZE << thp_order(page);
+}
+
 #endif /* _LINUX_HUGE_MM_H */
index 50650d0..d5cc5f8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/pgtable.h>
+#include <linux/gfp.h>
 
 struct ctl_table;
 struct user_struct;
@@ -164,7 +165,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                        unsigned long addr, unsigned long sz);
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long *addr, pte_t *ptep);
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -203,8 +205,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
        return NULL;
 }
 
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
-                                       pte_t *ptep)
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+                                       struct vm_area_struct *vma,
+                                       unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
@@ -504,13 +507,10 @@ struct huge_bootmem_page {
 
 struct page *alloc_huge_page(struct vm_area_struct *vma,
                                unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
-                               nodemask_t *nmask);
+                               nodemask_t *nmask, gfp_t gfp_mask);
 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
                                unsigned long address);
-struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
-                                    int nid, nodemask_t *nmask);
 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
                        pgoff_t idx);
 
@@ -692,6 +692,27 @@ static inline bool hugepage_movable_supported(struct hstate *h)
        return true;
 }
 
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+       if (hugepage_movable_supported(h))
+               return GFP_HIGHUSER_MOVABLE;
+       else
+               return GFP_HIGHUSER;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+       gfp_t modified_mask = htlb_alloc_mask(h);
+
+       /* Some callers might want to enforce node */
+       modified_mask |= (gfp_mask & __GFP_THISNODE);
+
+       modified_mask |= (gfp_mask & __GFP_NOWARN);
+
+       return modified_mask;
+}
+
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
                                           struct mm_struct *mm, pte_t *pte)
 {
@@ -759,13 +780,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
        return NULL;
 }
 
-static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
-{
-       return NULL;
-}
-
 static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+                       nodemask_t *nmask, gfp_t gfp_mask)
 {
        return NULL;
 }
@@ -878,6 +895,16 @@ static inline bool hugepage_movable_supported(struct hstate *h)
        return false;
 }
 
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+       return 0;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+       return 0;
+}
+
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
                                           struct mm_struct *mm, pte_t *pte)
 {
index d7d4250..78dd703 100644 (file)
@@ -72,7 +72,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
                            void *context);
 
 extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
 extern void unregister_hw_breakpoint(struct perf_event *bp);
 extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
 
@@ -119,8 +118,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
                            void *context)              { return NULL; }
 static inline int
 register_perf_hw_breakpoint(struct perf_event *bp)     { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp)   { return -ENOSYS; }
 static inline void unregister_hw_breakpoint(struct perf_event *bp)     { }
 static inline void
 unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)        { }
index 4e7714c..fc55ea4 100644 (file)
@@ -231,7 +231,6 @@ enum i2c_alert_protocol {
  * @detect: Callback for device detection
  * @address_list: The I2C addresses to probe (for detect)
  * @clients: List of detected clients we created (for i2c-core use only)
- * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
  *
  * The driver.owner field should be set to the module owner of this driver.
  * The driver.name field should be set to the name of this driver.
@@ -290,8 +289,6 @@ struct i2c_driver {
        int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
        const unsigned short *address_list;
        struct list_head clients;
-
-       bool disable_i2c_core_irq_mapping;
 };
 #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
 
@@ -609,6 +606,14 @@ struct i2c_timings {
  *     may configure padmux here for SDA/SCL line or something else they want.
  * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
  * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
+ * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins.
+ *      Optional.
+ * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned
+ *      to the I2C bus. Optional. Populated internally for GPIO recovery, if
+ *      state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid.
+ * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as
+ *      GPIOs. Optional. Populated internally for GPIO recovery, if this state
+ *      is called "gpio" or "recovery" and pinctrl is valid.
  */
 struct i2c_bus_recovery_info {
        int (*recover_bus)(struct i2c_adapter *adap);
@@ -625,6 +630,9 @@ struct i2c_bus_recovery_info {
        /* gpio recovery */
        struct gpio_desc *scl_gpiod;
        struct gpio_desc *sda_gpiod;
+       struct pinctrl *pinctrl;
+       struct pinctrl_state *pins_default;
+       struct pinctrl_state *pins_gpio;
 };
 
 int i2c_recover_bus(struct i2c_adapter *adap);
index 3e8fa1c..b1ed2f2 100644 (file)
@@ -381,8 +381,7 @@ enum {
 
 #define QI_DEV_EIOTLB_ADDR(a)  ((u64)(a) & VTD_PAGE_MASK)
 #define QI_DEV_EIOTLB_SIZE     (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g)  ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
@@ -600,6 +599,8 @@ struct intel_iommu {
        struct iommu_device iommu;  /* IOMMU core code handle */
        int             node;
        u32             flags;      /* Software defined flags */
+
+       struct dmar_drhd_unit *drhd;
 };
 
 /* PCI domain-device relationship */
@@ -705,7 +706,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
 
 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                              u32 pasid, u16 qdep, u64 addr,
-                             unsigned int size_order, u64 granu);
+                             unsigned int size_order);
 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
                          int pasid);
 
@@ -728,6 +729,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
 struct dmar_domain *find_domain(struct device *dev);
 struct device_domain_info *get_domain_info(struct device *dev);
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
 extern void intel_svm_check(struct intel_iommu *iommu);
@@ -740,6 +742,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
                                 void *drvdata);
 void intel_svm_unbind(struct iommu_sva *handle);
 int intel_svm_get_pasid(struct iommu_sva *handle);
+int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
+                           struct iommu_page_response *msg);
+
 struct svm_dev_ops;
 
 struct intel_svm_dev {
@@ -766,8 +771,6 @@ struct intel_svm {
        struct list_head devs;
        struct list_head list;
 };
-
-extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
 #else
 static inline void intel_svm_check(struct intel_iommu *iommu) {}
 #endif
index ae21b72..f32522b 100644 (file)
@@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
 
 #ifndef ioread64_hi_lo
 #define ioread64_hi_lo ioread64_hi_lo
-static inline u64 ioread64_hi_lo(void __iomem *addr)
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
 {
        u32 low, high;
 
@@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
 
 #ifndef ioread64be_hi_lo
 #define ioread64be_hi_lo ioread64be_hi_lo
-static inline u64 ioread64be_hi_lo(void __iomem *addr)
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
 {
        u32 low, high;
 
index faaa842..448a214 100644 (file)
@@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
 
 #ifndef ioread64_lo_hi
 #define ioread64_lo_hi ioread64_lo_hi
-static inline u64 ioread64_lo_hi(void __iomem *addr)
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
 {
        u32 low, high;
 
@@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
 
 #ifndef ioread64be_lo_hi
 #define ioread64be_lo_hi ioread64be_lo_hi
-static inline u64 ioread64be_lo_hi(void __iomem *addr)
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
 {
        u32 low, high;
 
index 53d53c6..23285ba 100644 (file)
@@ -155,7 +155,7 @@ struct io_pgtable_cfg {
  */
 struct io_pgtable_ops {
        int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
-                  phys_addr_t paddr, size_t size, int prot);
+                  phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
        size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
                        size_t size, struct iommu_iotlb_gather *gather);
        phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
index 5f0b785..fee209e 100644 (file)
  * if the IOMMU page table format is equivalent.
  */
 #define IOMMU_PRIV     (1 << 5)
-/*
- * Non-coherent masters can use this page protection flag to set cacheable
- * memory attributes for only a transparent outer level of cache, also known as
- * the last-level or system cache.
- */
-#define IOMMU_SYS_CACHE_ONLY   (1 << 6)
 
 struct iommu_ops;
 struct iommu_group;
@@ -457,22 +451,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
                        iommu_fault_handler_t handler, void *token);
 
-/**
- * iommu_map_sgtable - Map the given buffer to the IOMMU domain
- * @domain:    The IOMMU domain to perform the mapping
- * @iova:      The start address to map the buffer
- * @sgt:       The sg_table object describing the buffer
- * @prot:      IOMMU protection bits
- *
- * Creates a mapping at @iova for the buffer described by a scatterlist
- * stored in the given sg_table object in the provided IOMMU domain.
- */
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
-                       unsigned long iova, struct sg_table *sgt, int prot)
-{
-       return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
-}
-
 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern void generic_iommu_put_resv_regions(struct device *dev,
@@ -1079,6 +1057,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
 }
 #endif /* CONFIG_IOMMU_API */
 
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain:    The IOMMU domain to perform the mapping
+ * @iova:      The start address to map the buffer
+ * @sgt:       The sg_table object describing the buffer
+ * @prot:      IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+                       unsigned long iova, struct sg_table *sgt, int prot)
+{
+       return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
 #ifdef CONFIG_IOMMU_DEBUGFS
 extern struct dentry *iommu_debugfs_dir;
 void iommu_debugfs_setup(void);
index 216e5ad..dca379c 100644 (file)
@@ -2,7 +2,7 @@
 /**
  * irq-omap-intc.h - INTC Idle Functions
  *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
  *
  * Author: Felipe Balbi <balbi@ti.com>
  */
index 4aaa297..08f9049 100644 (file)
@@ -1381,7 +1381,7 @@ extern int         jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
 extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern int      jbd2_journal_invalidatepage(journal_t *,
                                struct page *, unsigned int, unsigned int);
-extern int      jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int      jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
 extern int      jbd2_journal_stop(handle_t *);
 extern int      jbd2_journal_flush (journal_t *);
 extern void     jbd2_journal_lock_updates (journal_t *);
index ba2f6a9..19ddd43 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
  *
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
  *
  * These are the credits from Bob's sources:
  *
index 7339a00..500def6 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/byteorder.h>
 #include <asm/div64.h>
 #include <uapi/linux/kernel.h>
-#include <asm/div64.h>
 
 #define STACK_MAGIC    0xdeadbeef
 
@@ -322,8 +321,7 @@ void panic(const char *fmt, ...) __noreturn __cold;
 void nmi_panic(struct pt_regs *regs, const char *msg);
 extern void oops_enter(void);
 extern void oops_exit(void);
-void print_oops_end_marker(void);
-extern int oops_may_print(void);
+extern bool oops_may_print(void);
 void do_exit(long error_code) __noreturn;
 void complete_and_exit(struct completion *, long) __noreturn;
 
@@ -347,7 +345,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
 */
 static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
 {
@@ -375,7 +373,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
  */
 static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
 {
index 42d2e6a..a12b552 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/time.h>
 #include <linux/jiffies.h>
+#include <asm/bug.h>
 
 /* Nanosecond scalar representation for kernel time values */
 typedef s64    ktime_t;
index dc1da02..dac047a 100644 (file)
@@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
        wait_queue_entry_t wait;
        /* Update side is protected by irqfds.lock */
        struct kvm_kernel_irq_routing_entry irq_entry;
-       seqcount_t irq_entry_sc;
+       seqcount_spinlock_t irq_entry_sc;
        /* Used for level IRQ fast-path */
        int gsi;
        struct work_struct inject;
index 5eb111f..420b61e 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 // TI LMU Common Core
-// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
 
 #ifndef _TI_LMU_COMMON_H_
 #define _TI_LMU_COMMON_H_
index 18da405..01f251b 100644 (file)
@@ -76,8 +76,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
 struct device_node;
 struct nvdimm_bus_descriptor {
        const struct attribute_group **attr_groups;
-       unsigned long bus_dsm_mask;
        unsigned long cmd_mask;
+       unsigned long dimm_family_mask;
+       unsigned long bus_family_mask;
        struct module *module;
        char *provider_name;
        struct device_node *of_node;
@@ -85,6 +86,7 @@ struct nvdimm_bus_descriptor {
        int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
        int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
                        struct nvdimm *nvdimm, unsigned int cmd, void *data);
+       const struct nvdimm_bus_fw_ops *fw_ops;
 };
 
 struct nd_cmd_desc {
@@ -199,6 +201,49 @@ struct nvdimm_security_ops {
        int (*query_overwrite)(struct nvdimm *nvdimm);
 };
 
+enum nvdimm_fwa_state {
+       NVDIMM_FWA_INVALID,
+       NVDIMM_FWA_IDLE,
+       NVDIMM_FWA_ARMED,
+       NVDIMM_FWA_BUSY,
+       NVDIMM_FWA_ARM_OVERFLOW,
+};
+
+enum nvdimm_fwa_trigger {
+       NVDIMM_FWA_ARM,
+       NVDIMM_FWA_DISARM,
+};
+
+enum nvdimm_fwa_capability {
+       NVDIMM_FWA_CAP_INVALID,
+       NVDIMM_FWA_CAP_NONE,
+       NVDIMM_FWA_CAP_QUIESCE,
+       NVDIMM_FWA_CAP_LIVE,
+};
+
+enum nvdimm_fwa_result {
+       NVDIMM_FWA_RESULT_INVALID,
+       NVDIMM_FWA_RESULT_NONE,
+       NVDIMM_FWA_RESULT_SUCCESS,
+       NVDIMM_FWA_RESULT_NOTSTAGED,
+       NVDIMM_FWA_RESULT_NEEDRESET,
+       NVDIMM_FWA_RESULT_FAIL,
+};
+
+struct nvdimm_bus_fw_ops {
+       enum nvdimm_fwa_state (*activate_state)
+               (struct nvdimm_bus_descriptor *nd_desc);
+       enum nvdimm_fwa_capability (*capability)
+               (struct nvdimm_bus_descriptor *nd_desc);
+       int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
+};
+
+struct nvdimm_fw_ops {
+       enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
+       enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
+       int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
+};
+
 void badrange_init(struct badrange *badrange);
 int badrange_add(struct badrange *badrange, u64 addr, u64 length);
 void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -224,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
                void *provider_data, const struct attribute_group **groups,
                unsigned long flags, unsigned long cmd_mask, int num_flush,
                struct resource *flush_wpq, const char *dimm_id,
-               const struct nvdimm_security_ops *sec_ops);
+               const struct nvdimm_security_ops *sec_ops,
+               const struct nvdimm_fw_ops *fw_ops);
 static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
                void *provider_data, const struct attribute_group **groups,
                unsigned long flags, unsigned long cmd_mask, int num_flush,
                struct resource *flush_wpq)
 {
        return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
-                       cmd_mask, num_flush, flush_wpq, NULL, NULL);
+                       cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
 }
 
 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
index 39a3569..62a382d 100644 (file)
@@ -11,6 +11,7 @@
 #define __LINUX_LOCKDEP_H
 
 #include <linux/lockdep_types.h>
+#include <linux/smp.h>
 #include <asm/percpu.h>
 
 struct task_struct;
index af998f9..2a8c74d 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 /*
- * The macro LSM_HOOK is used to define the data structures required by the
+ * The macro LSM_HOOK is used to define the data structures required by
  * the LSM framework using the pattern:
  *
  *     LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
index 95b7c1d..9e2e3e6 100644 (file)
  *     structure. Note that the security field was not added directly to the
  *     socket structure, but rather, the socket security information is stored
  *     in the associated inode.  Typically, the inode alloc_security hook will
- *     allocate and and attach security information to
+ *     allocate and attach security information to
  *     SOCK_INODE(sock)->i_security.  This hook may be used to update the
  *     SOCK_INODE(sock)->i_security field with additional information that
  *     wasn't available when the inode was allocated.
index 1bb49b6..d0b0361 100644 (file)
@@ -32,6 +32,7 @@ struct kmem_cache;
 enum memcg_stat_item {
        MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
        MEMCG_SOCK,
+       MEMCG_PERCPU_B,
        MEMCG_NR_STAT,
 };
 
@@ -64,8 +65,8 @@ struct mem_cgroup_id {
 
 /*
  * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
+ * it will be incremented by the number of pages. This counter is used
+ * to trigger some periodic events. This is straightforward and better
  * than using jiffies etc. to handle periodic memcg event.
  */
 enum mem_cgroup_events_target {
@@ -339,6 +340,13 @@ struct mem_cgroup {
 
 extern struct mem_cgroup *root_mem_cgroup;
 
+static __always_inline bool memcg_stat_item_in_bytes(int idx)
+{
+       if (idx == MEMCG_PERCPU_B)
+               return true;
+       return vmstat_item_in_bytes(idx);
+}
+
 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 {
        return (memcg == root_mem_cgroup);
@@ -622,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
        struct mem_cgroup_per_node *mz;
 
        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       return mz->lru_zone_size[zone_idx][lru];
+       return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
 }
 
 void mem_cgroup_handle_over_high(void);
index ea9c15b..5f1c74d 100644 (file)
@@ -6,7 +6,7 @@
 #ifndef _LINUX_MEMPOLICY_H
 #define _LINUX_MEMPOLICY_H 1
 
-
+#include <linux/sched.h>
 #include <linux/mmzone.h>
 #include <linux/dax.h>
 #include <linux/slab.h>
@@ -28,7 +28,7 @@ struct mm_struct;
  * the process policy is used. Interrupts ignore the memory policy
  * of the current process.
  *
- * Locking policy for interlave:
+ * Locking policy for interleave:
  * In process context there is no locking because only the process accesses
  * its own state. All vma manipulation is somewhat protected by a down_read on
  * mmap_lock.
@@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma,
 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
                                const nodemask_t *mask);
+extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
+
+static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+{
+       struct mempolicy *mpol = get_task_policy(current);
+
+       return policy_nodemask(gfp, mpol);
+}
+
 extern unsigned int mempolicy_slab_node(void);
 
 extern enum zone_type policy_zone;
@@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 static inline void mpol_put_task_policy(struct task_struct *task)
 {
 }
+
+static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+{
+       return NULL;
+}
 #endif /* CONFIG_NUMA */
 #endif
index ab76cdd..4b35baa 100644 (file)
@@ -14,7 +14,7 @@
 
 #define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
 
-#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \
        {                                                               \
                .name = (_name),                                        \
                .resources = (_res),                                    \
                .platform_data = (_pdata),                              \
                .pdata_size = (_pdsize),                                \
                .of_compatible = (_compat),                             \
+               .of_reg = (_of_reg),                                    \
+               .use_of_reg = (_use_of_reg),                            \
                .acpi_match = (_match),                                 \
                .id = (_id),                                            \
        }
 
-#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat)         \
-       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL)  \
+#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
 
-#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match)       \
-       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match)   \
+#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \
+       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
 
-#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id)              \
-       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL)     \
+#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
 
-#define MFD_CELL_RES(_name, _res)                                      \
-       MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL)               \
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+       MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL)
 
-#define MFD_CELL_NAME(_name)                                           \
-       MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL)               \
+#define MFD_CELL_RES(_name, _res) \
+       MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_CELL_NAME(_name) \
+       MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_DEP_LEVEL_NORMAL 0
+#define MFD_DEP_LEVEL_HIGH 1
 
 struct irq_domain;
 struct property_entry;
@@ -58,6 +66,7 @@ struct mfd_cell_acpi_match {
 struct mfd_cell {
        const char              *name;
        int                     id;
+       int                     level;
 
        int                     (*enable)(struct platform_device *dev);
        int                     (*disable)(struct platform_device *dev);
@@ -78,6 +87,16 @@ struct mfd_cell {
         */
        const char              *of_compatible;
 
+       /*
+        * Address as defined in Device Tree.  Used to compement 'of_compatible'
+        * (above) when matching OF nodes with devices that have identical
+        * compatible strings
+        */
+       const u64 of_reg;
+
+       /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+       bool use_of_reg;
+
        /* Matches ACPI */
        const struct mfd_cell_acpi_match        *acpi_match;
 
@@ -135,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
 }
 
 extern void mfd_remove_devices(struct device *parent);
+extern void mfd_remove_devices_late(struct device *parent);
 
 extern int devm_mfd_add_devices(struct device *dev, int id,
                                const struct mfd_cell *cells, int n_devs,
index eac48e4..d3f1269 100644 (file)
@@ -35,7 +35,7 @@ struct da9055_pdata {
        int *gpio_rsel;
        /*
         * Regulator mode control bits value (GPI offset) that
-        * that controls the regulator state, 0 if not available.
+        * controls the regulator state, 0 if not available.
         */
        enum gpio_select *reg_ren;
        /*
index 5cd06ab..fa7a43f 100644 (file)
@@ -35,6 +35,7 @@ enum da9063_variant_codes {
        PMIC_DA9063_AD = 0x3,
        PMIC_DA9063_BB = 0x5,
        PMIC_DA9063_CA = 0x6,
+       PMIC_DA9063_DA = 0x7,
 };
 
 /* Interrupts */
index ba706b0..1dbabf1 100644 (file)
 #define        DA9063_BB_REG_GP_ID_19          0x134
 
 /* Chip ID and variant */
-#define        DA9063_REG_CHIP_ID              0x181
-#define        DA9063_REG_CHIP_VARIANT         0x182
+#define        DA9063_REG_DEVICE_ID            0x181
+#define        DA9063_REG_VARIANT_ID           0x182
+#define        DA9063_REG_CUSTOMER_ID          0x183
+#define        DA9063_REG_CONFIG_ID            0x184
 
 /*
  * PMIC registers bits
 #define        DA9063_RTC_CLOCK                        0x40
 #define        DA9063_OUT_32K_EN                       0x80
 
-/* DA9063_REG_CHIP_VARIANT */
-#define        DA9063_CHIP_VARIANT_SHIFT               4
-
 /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
 #define DA9063_BIO_ILIM_MASK                   0x0F
 #define DA9063_BMEM_ILIM_MASK                  0xF0
 #define                DA9063_MON_A10_IDX_LDO9         0x04
 #define                DA9063_MON_A10_IDX_LDO10        0x05
 
+/* DA9063_REG_VARIANT_ID (addr=0x182) */
+#define        DA9063_VARIANT_ID_VRC_SHIFT             0
+#define DA9063_VARIANT_ID_VRC_MASK             0x0F
+#define        DA9063_VARIANT_ID_MRC_SHIFT             4
+#define DA9063_VARIANT_ID_MRC_MASK             0xF0
+
 #endif /* _DA9063_REG_H */
index bbc6448..2cadf88 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
  *              http://www.hisilicon.com
  * Copyright (c) <2013-2014> Linaro Ltd.
- *              http://www.linaro.org
+ *              https://www.linaro.org
  *
  * Author: Guodong Xu <guodong.xu@linaro.org>
  */
diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h
new file mode 100644 (file)
index 0000000..a99ba2e
--- /dev/null
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Khadas System control Microcontroller Register map
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef MFD_KHADAS_MCU_H
+#define MFD_KHADAS_MCU_H
+
+#define KHADAS_MCU_PASSWD_VEN_0_REG            0x00 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_1_REG            0x01 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_2_REG            0x02 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_3_REG            0x03 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_4_REG            0x04 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_5_REG            0x05 /* RO */
+#define KHADAS_MCU_MAC_0_REG                   0x06 /* RO */
+#define KHADAS_MCU_MAC_1_REG                   0x07 /* RO */
+#define KHADAS_MCU_MAC_2_REG                   0x08 /* RO */
+#define KHADAS_MCU_MAC_3_REG                   0x09 /* RO */
+#define KHADAS_MCU_MAC_4_REG                   0x0a /* RO */
+#define KHADAS_MCU_MAC_5_REG                   0x0b /* RO */
+#define KHADAS_MCU_USID_0_REG                  0x0c /* RO */
+#define KHADAS_MCU_USID_1_REG                  0x0d /* RO */
+#define KHADAS_MCU_USID_2_REG                  0x0e /* RO */
+#define KHADAS_MCU_USID_3_REG                  0x0f /* RO */
+#define KHADAS_MCU_USID_4_REG                  0x10 /* RO */
+#define KHADAS_MCU_USID_5_REG                  0x11 /* RO */
+#define KHADAS_MCU_VERSION_0_REG               0x12 /* RO */
+#define KHADAS_MCU_VERSION_1_REG               0x13 /* RO */
+#define KHADAS_MCU_DEVICE_NO_0_REG             0x14 /* RO */
+#define KHADAS_MCU_DEVICE_NO_1_REG             0x15 /* RO */
+#define KHADAS_MCU_FACTORY_TEST_REG            0x16 /* R */
+#define KHADAS_MCU_BOOT_MODE_REG               0x20 /* RW */
+#define KHADAS_MCU_BOOT_EN_WOL_REG             0x21 /* RW */
+#define KHADAS_MCU_BOOT_EN_RTC_REG             0x22 /* RW */
+#define KHADAS_MCU_BOOT_EN_EXP_REG             0x23 /* RW */
+#define KHADAS_MCU_BOOT_EN_IR_REG              0x24 /* RW */
+#define KHADAS_MCU_BOOT_EN_DCIN_REG            0x25 /* RW */
+#define KHADAS_MCU_BOOT_EN_KEY_REG             0x26 /* RW */
+#define KHADAS_MCU_KEY_MODE_REG                        0x27 /* RW */
+#define KHADAS_MCU_LED_MODE_ON_REG             0x28 /* RW */
+#define KHADAS_MCU_LED_MODE_OFF_REG            0x29 /* RW */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_REG         0x2c /* RW */
+#define KHADAS_MCU_MAC_SWITCH_REG              0x2d /* RW */
+#define KHADAS_MCU_MCU_SLEEP_MODE_REG          0x2e /* RW */
+#define KHADAS_MCU_IR_CODE1_0_REG              0x2f /* RW */
+#define KHADAS_MCU_IR_CODE1_1_REG              0x30 /* RW */
+#define KHADAS_MCU_IR_CODE1_2_REG              0x31 /* RW */
+#define KHADAS_MCU_IR_CODE1_3_REG              0x32 /* RW */
+#define KHADAS_MCU_USB_PCIE_SWITCH_REG         0x33 /* RW */
+#define KHADAS_MCU_IR_CODE2_0_REG              0x34 /* RW */
+#define KHADAS_MCU_IR_CODE2_1_REG              0x35 /* RW */
+#define KHADAS_MCU_IR_CODE2_2_REG              0x36 /* RW */
+#define KHADAS_MCU_IR_CODE2_3_REG              0x37 /* RW */
+#define KHADAS_MCU_PASSWD_USER_0_REG           0x40 /* RW */
+#define KHADAS_MCU_PASSWD_USER_1_REG           0x41 /* RW */
+#define KHADAS_MCU_PASSWD_USER_2_REG           0x42 /* RW */
+#define KHADAS_MCU_PASSWD_USER_3_REG           0x43 /* RW */
+#define KHADAS_MCU_PASSWD_USER_4_REG           0x44 /* RW */
+#define KHADAS_MCU_PASSWD_USER_5_REG           0x45 /* RW */
+#define KHADAS_MCU_USER_DATA_0_REG             0x46 /* RW 56 bytes */
+#define KHADAS_MCU_PWR_OFF_CMD_REG             0x80 /* WO */
+#define KHADAS_MCU_PASSWD_START_REG            0x81 /* WO */
+#define KHADAS_MCU_CHECK_VEN_PASSWD_REG                0x82 /* WO */
+#define KHADAS_MCU_CHECK_USER_PASSWD_REG       0x83 /* WO */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG  0x86 /* RO */
+#define KHADAS_MCU_WOL_INIT_START_REG          0x87 /* WO */
+#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG     0x88 /* WO */
+
+enum {
+       KHADAS_BOARD_VIM1 = 0x1,
+       KHADAS_BOARD_VIM2,
+       KHADAS_BOARD_VIM3,
+       KHADAS_BOARD_EDGE = 0x11,
+       KHADAS_BOARD_EDGE_V,
+};
+
+/**
+ * struct khadas_mcu - Khadas MCU structure
+ * @device:            device reference used for logs
+ * @regmap:            register map
+ */
+struct khadas_mcu {
+       struct device *dev;
+       struct regmap *regmap;
+};
+
+#endif /* MFD_KHADAS_MCU_H */
index edbec83..5546688 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Functions to access LP873X power management chip.
  *
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index ce96535..43716ac 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Functions to access LP87565 power management chip.
  *
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
  */
 
 #ifndef __LINUX_MFD_LP87565_H
index fa9595d..601cbbc 100644 (file)
@@ -21,7 +21,6 @@
 
 struct gpio_desc;
 struct pinctrl_map;
-struct madera_codec_pdata;
 
 /**
  * struct madera_pdata - Configuration data for Madera devices
index e798c81..311f7d3 100644 (file)
@@ -131,7 +131,7 @@ enum max77693_pmic_reg {
 #define FLASH_INT_FLED1_SHORT  BIT(3)
 #define FLASH_INT_OVER_CURRENT BIT(4)
 
-/* Fast charge timer in in hours */
+/* Fast charge timer in hours */
 #define DEFAULT_FAST_CHARGE_TIMER              4
 /* microamps */
 #define DEFAULT_TOP_OFF_THRESHOLD_CURRENT      150000
index d469aa4..b08570f 100644 (file)
@@ -9,11 +9,9 @@
 #ifndef _SKY81452_H
 #define _SKY81452_H
 
-#include <linux/platform_data/sky81452-backlight.h>
 #include <linux/regulator/machine.h>
 
 struct sky81452_platform_data {
-       struct sky81452_bl_platform_data *bl_pdata;
        struct regulator_init_data *regulator_init_data;
 };
 
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
deleted file mode 100644 (file)
index 8394412..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * SMSC ECE1099
- *
- * Copyright 2012 Texas Instruments Inc.
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- */
-
-#ifndef __LINUX_MFD_SMSC_H
-#define __LINUX_MFD_SMSC_H
-
-#include <linux/regmap.h>
-
-#define SMSC_ID_ECE1099                        1
-#define SMSC_NUM_CLIENTS               2
-
-#define SMSC_BASE_ADDR                 0x38
-#define OMAP_GPIO_SMSC_IRQ             151
-
-#define SMSC_MAXGPIO         32
-#define SMSC_BANK(offs)      ((offs) >> 3)
-#define SMSC_BIT(offs)       (1u << ((offs) & 0x7))
-
-struct smsc {
-       struct device *dev;
-       struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
-       struct regmap *regmap;
-       int clk;
-       /* Stored chip id */
-       int id;
-};
-
-struct smsc_gpio;
-struct smsc_keypad;
-
-static inline int smsc_read(struct device *child, unsigned int reg,
-       unsigned int *dest)
-{
-       struct smsc     *smsc = dev_get_drvdata(child->parent);
-
-       return regmap_read(smsc->regmap, reg, dest);
-}
-
-static inline int smsc_write(struct device *child, unsigned int reg,
-       unsigned int value)
-{
-       struct smsc     *smsc = dev_get_drvdata(child->parent);
-
-       return regmap_write(smsc->regmap, reg, value);
-}
-
-/* Registers for SMSC */
-#define SMSC_RESET                                             0xF5
-#define SMSC_GRP_INT                                           0xF9
-#define SMSC_CLK_CTRL                                          0xFA
-#define SMSC_WKUP_CTRL                                         0xFB
-#define SMSC_DEV_ID                                            0xFC
-#define SMSC_DEV_REV                                           0xFD
-#define SMSC_VEN_ID_L                                          0xFE
-#define SMSC_VEN_ID_H                                          0xFF
-
-/* CLK VALUE */
-#define SMSC_CLK_VALUE                                         0x13
-
-/* Registers for function GPIO INPUT */
-#define SMSC_GPIO_DATA_IN_START                                        0x00
-
-/* Registers for function GPIO OUPUT */
-#define SMSC_GPIO_DATA_OUT_START                                       0x05
-
-/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
-#define SMSC_GPIO_INPUT_LOW                                    0x01
-#define SMSC_GPIO_INPUT_RISING                                 0x09
-#define SMSC_GPIO_INPUT_FALLING                                        0x11
-#define SMSC_GPIO_INPUT_BOTH_EDGE                              0x19
-#define SMSC_GPIO_OUTPUT_PP                                    0x21
-#define SMSC_GPIO_OUTPUT_OP                                    0x31
-
-#define GRP_INT_STAT                                           0xf9
-#define        SMSC_GPI_INT                                            0x0f
-#define SMSC_CFG_START                                         0x0A
-
-/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
-#define SMSC_GPIO_INT_STAT_START                                  0x32
-
-/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
-#define SMSC_GPIO_INT_MASK_START                               0x37
-
-/* Registers for SMSC function KEYPAD*/
-#define SMSC_KP_OUT                                            0x40
-#define SMSC_KP_IN                                             0x41
-#define SMSC_KP_INT_STAT                                       0x42
-#define SMSC_KP_INT_MASK                                       0x43
-
-/* Definitions for keypad */
-#define SMSC_KP_KSO           0x70
-#define SMSC_KP_KSI           0x51
-#define SMSC_KSO_ALL_LOW        0x20
-#define SMSC_KP_SET_LOW_PWR        0x0B
-#define SMSC_KP_SET_HIGH           0xFF
-#define SMSC_KSO_EVAL           0x00
-
-#endif /*  __LINUX_MFD_SMSC_H */
index 605f622..90b2055 100644 (file)
 #define STM32_LPTIM_CMPOK              BIT(3)
 
 /* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_ARRMCF             BIT(1)
 #define STM32_LPTIM_CMPOKCF_ARROKCF    GENMASK(4, 3)
 
+/* STM32_LPTIM_IER - bit flieds */
+#define STM32_LPTIM_ARRMIE     BIT(1)
+
 /* STM32_LPTIM_CR - bit fields */
 #define STM32_LPTIM_CNTSTRT    BIT(2)
+#define STM32_LPTIM_SNGSTRT    BIT(1)
 #define STM32_LPTIM_ENABLE     BIT(0)
 
 /* STM32_LPTIM_CFGR - bit fields */
index 4831684..ffc091b 100644 (file)
@@ -4,7 +4,7 @@
 /*
  * TI Touch Screen / ADC MFD driver
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index a228ae4..e0a417e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index b5dd108..db70918 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Functions to access TPS65217 power management chip.
  *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index b0470c3..f4ca367 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Functions to access TPS65219 power management chip.
  *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License version 2 as
index b25d029..7943e41 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index 540998d..0f8d158 100644 (file)
@@ -10,6 +10,8 @@
 typedef struct page *new_page_t(struct page *page, unsigned long private);
 typedef void free_page_t(struct page *page, unsigned long private);
 
+struct migration_target_control;
+
 /*
  * Return values from addresss_space_operations.migratepage():
  * - negative errno on page migration failure;
@@ -31,34 +33,6 @@ enum migrate_reason {
 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
 extern const char *migrate_reason_names[MR_TYPES];
 
-static inline struct page *new_page_nodemask(struct page *page,
-                               int preferred_nid, nodemask_t *nodemask)
-{
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
-       unsigned int order = 0;
-       struct page *new_page = NULL;
-
-       if (PageHuge(page))
-               return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
-                               preferred_nid, nodemask);
-
-       if (PageTransHuge(page)) {
-               gfp_mask |= GFP_TRANSHUGE;
-               order = HPAGE_PMD_ORDER;
-       }
-
-       if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
-               gfp_mask |= __GFP_HIGHMEM;
-
-       new_page = __alloc_pages_nodemask(gfp_mask, order,
-                               preferred_nid, nodemask);
-
-       if (new_page && PageTransHuge(new_page))
-               prep_transhuge_page(new_page);
-
-       return new_page;
-}
-
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
@@ -67,6 +41,7 @@ extern int migrate_page(struct address_space *mapping,
                        enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
                unsigned long private, enum migrate_mode mode, int reason);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 extern void putback_movable_page(struct page *page);
 
@@ -85,6 +60,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
                free_page_t free, unsigned long private, enum migrate_mode mode,
                int reason)
        { return -ENOSYS; }
+static inline struct page *alloc_migration_target(struct page *page,
+               unsigned long private)
+       { return NULL; }
 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
        { return -EBUSY; }
 
index f6a82f9..1983e08 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/resource.h>
 #include <linux/page_ext.h>
 #include <linux/err.h>
+#include <linux/page-flags.h>
 #include <linux/page_ref.h>
 #include <linux/memremap.h>
 #include <linux/overflow.h>
@@ -38,6 +39,7 @@ struct file_ra_state;
 struct user_struct;
 struct writeback_control;
 struct bdi_writeback;
+struct pt_regs;
 
 void init_mm_internals(void);
 
@@ -479,7 +481,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
        { FAULT_FLAG_INTERRUPTIBLE,     "INTERRUPTIBLE" }
 
 /*
- * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * vm_fault is filled by the pagefault handler and passed to the vma's
  * ->fault function. The vma's ->fault is responsible for returning a bitmask
  * of VM_FAULT_xxx flags that give details about how the fault was handled.
  *
@@ -667,11 +669,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
 struct mmu_gather;
 struct inode;
 
-/*
- * FIXME: take this include out, include page-flags.h in
- * files which need it (119 of them)
- */
-#include <linux/page-flags.h>
 #include <linux/huge_mm.h>
 
 /*
@@ -921,12 +918,15 @@ static inline int compound_pincount(struct page *page)
 static inline void set_compound_order(struct page *page, unsigned int order)
 {
        page[1].compound_order = order;
+       page[1].compound_nr = 1U << order;
 }
 
 /* Returns the number of pages in this potentially compound page. */
 static inline unsigned long compound_nr(struct page *page)
 {
-       return 1UL << compound_order(page);
+       if (!PageHead(page))
+               return 1;
+       return page[1].compound_nr;
 }
 
 /* Returns the number of bytes in this potentially compound page. */
@@ -1067,6 +1067,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
 
 static inline enum zone_type page_zonenum(const struct page *page)
 {
+       ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
 
@@ -1594,6 +1595,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
 extern void pagefault_out_of_memory(void);
 
 #define offset_in_page(p)      ((unsigned long)(p) & ~PAGE_MASK)
+#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
 
 /*
  * Flags passed to show_mem() and show_free_areas() to suppress output in
@@ -1658,8 +1660,9 @@ int invalidate_inode_page(struct page *page);
 
 #ifdef CONFIG_MMU
 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
-                       unsigned long address, unsigned int flags);
-extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+                                 unsigned long address, unsigned int flags,
+                                 struct pt_regs *regs);
+extern int fixup_user_fault(struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
 void unmap_mapping_pages(struct address_space *mapping,
@@ -1668,14 +1671,14 @@ void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
 #else
 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags)
+                                        unsigned long address, unsigned int flags,
+                                        struct pt_regs *regs)
 {
        /* should never happen if there's no MMU */
        BUG();
        return VM_FAULT_SIGBUS;
 }
-static inline int fixup_user_fault(struct task_struct *tsk,
-               struct mm_struct *mm, unsigned long address,
+static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
                unsigned int fault_flags, bool *unlocked)
 {
        /* should never happen if there's no MMU */
@@ -1701,11 +1704,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long addr, void *buf, int len, unsigned int gup_flags);
 
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
                            unsigned long start, unsigned long nr_pages,
                            unsigned int gup_flags, struct page **pages,
                            struct vm_area_struct **vmas, int *locked);
-long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long pin_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
                           struct vm_area_struct **vmas, int *locked);
@@ -2599,7 +2602,7 @@ extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
-/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
 extern int expand_downwards(struct vm_area_struct *vma,
                unsigned long address);
 #if VM_GROWSUP
index 219bef4..8fc71e9 100644 (file)
@@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
 static __always_inline void add_page_to_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-       update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
        list_add(&page->lru, &lruvec->lists[lru]);
 }
 
 static __always_inline void add_page_to_lru_list_tail(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
-       update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
        list_add_tail(&page->lru, &lruvec->lists[lru]);
 }
 
@@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
                                struct lruvec *lruvec, enum lru_list lru)
 {
        list_del(&page->lru);
-       update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+       update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
 }
 
 /**
index 0277fba..496c3ff 100644 (file)
@@ -134,6 +134,7 @@ struct page {
                        unsigned char compound_dtor;
                        unsigned char compound_order;
                        atomic_t compound_mapcount;
+                       unsigned int compound_nr; /* 1 << compound_order */
                };
                struct {        /* Second tail page of compound page */
                        unsigned long _compound_pad_1;  /* compound_head */
index 635a96c..8379432 100644 (file)
@@ -173,9 +173,15 @@ enum node_stat_item {
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        WORKINGSET_NODES,
-       WORKINGSET_REFAULT,
-       WORKINGSET_ACTIVATE,
-       WORKINGSET_RESTORE,
+       WORKINGSET_REFAULT_BASE,
+       WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
+       WORKINGSET_REFAULT_FILE,
+       WORKINGSET_ACTIVATE_BASE,
+       WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
+       WORKINGSET_ACTIVATE_FILE,
+       WORKINGSET_RESTORE_BASE,
+       WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
+       WORKINGSET_RESTORE_FILE,
        WORKINGSET_NODERECLAIM,
        NR_ANON_MAPPED, /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -277,8 +283,8 @@ struct lruvec {
        unsigned long                   file_cost;
        /* Non-resident age, driven by LRU movement */
        atomic_long_t                   nonresident_age;
-       /* Refaults at the time of last reclaim cycle */
-       unsigned long                   refaults;
+       /* Refaults at the time of last reclaim cycle, anon=0, file=1 */
+       unsigned long                   refaults[2];
        /* Various lruvec state flags (enum lruvec_flags) */
        unsigned long                   flags;
 #ifdef CONFIG_MEMCG
@@ -530,6 +536,7 @@ struct zone {
         * On compaction failure, 1<<compact_defer_shift compactions
         * are skipped before trying again. The number attempted since
         * last failure is tracked with compact_considered.
+        * compact_order_failed is the minimum compaction failed order.
         */
        unsigned int            compact_considered;
        unsigned int            compact_defer_shift;
index 2e66708..e30ed5f 100644 (file)
@@ -389,6 +389,7 @@ struct module {
        unsigned int num_gpl_syms;
        const struct kernel_symbol *gpl_syms;
        const s32 *gpl_crcs;
+       bool using_gplonly_symbols;
 
 #ifdef CONFIG_UNUSED_SYMBOLS
        /* unused exported symbols. */
@@ -582,34 +583,14 @@ struct module *find_module(const char *name);
 struct symsearch {
        const struct kernel_symbol *start, *stop;
        const s32 *crcs;
-       enum {
+       enum mod_license {
                NOT_GPL_ONLY,
                GPL_ONLY,
                WILL_BE_GPL_ONLY,
-       } licence;
+       } license;
        bool unused;
 };
 
-/*
- * Search for an exported symbol by name.
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-const struct kernel_symbol *find_symbol(const char *name,
-                                       struct module **owner,
-                                       const s32 **crc,
-                                       bool gplok,
-                                       bool warn);
-
-/*
- * Walk the exported symbol table
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
-                                   struct module *owner,
-                                   void *data), void *data);
-
 /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
    symnum out of range. */
 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -657,7 +638,6 @@ static inline void __module_get(struct module *module)
 #define symbol_put_addr(p) do { } while (0)
 
 #endif /* CONFIG_MODULE_UNLOAD */
-int ref_module(struct module *a, struct module *b);
 
 /* This is a #define so the string doesn't get put in every .o file */
 #define module_name(mod)                       \
index 3ef917f..1ad5aa3 100644 (file)
@@ -108,7 +108,7 @@ struct kparam_array
  * ".") the kernel commandline parameter.  Note that - is changed to _, so
  * the user can use "foo-bar=1" even for variable "foo_bar".
  *
- * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * @perm is 0 if the variable is not to appear in sysfs, or 0444
  * for world-readable, 0644 for root-writable, etc.  Note that if it
  * is writable, you may need to use kernel_param_lock() around
  * accesses (esp. charp, which can be kfreed when it changes).
index 2dfe659..2129f7d 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0
  *
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
  */
 
 #ifndef __LINUX_MTD_HYPERBUS_H__
index 0c74838..af99041 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <linux/mtd/mtd.h>
 
+struct nand_device;
+
 /**
  * struct nand_memory_organization - Memory organization structure
  * @bits_per_cell: number of bits per NAND cell
@@ -114,11 +116,11 @@ struct nand_page_io_req {
 };
 
 /**
- * struct nand_ecc_req - NAND ECC requirements
+ * struct nand_ecc_props - NAND ECC properties
  * @strength: ECC strength
- * @step_size: ECC step/block size
+ * @step_size: Number of bytes per step
  */
-struct nand_ecc_req {
+struct nand_ecc_props {
        unsigned int strength;
        unsigned int step_size;
 };
@@ -133,8 +135,6 @@ struct nand_bbt {
        unsigned long *cache;
 };
 
-struct nand_device;
-
 /**
  * struct nand_ops - NAND operations
  * @erase: erase a specific block. No need to check if the block is bad before
@@ -179,7 +179,7 @@ struct nand_ops {
 struct nand_device {
        struct mtd_info mtd;
        struct nand_memory_organization memorg;
-       struct nand_ecc_req eccreq;
+       struct nand_ecc_props eccreq;
        struct nand_row_converter rowconv;
        struct nand_bbt bbt;
        const struct nand_ops *ops;
index 122f343..6166e7c 100644 (file)
@@ -19,7 +19,7 @@
 /* Identification info for LPDDR chip */
 #define PFOW_MANUFACTURER_ID                   0x0020
 #define PFOW_DEVICE_ID                         0x0022
-/* Address in PFOW where prog buffer can can be found */
+/* Address in PFOW where prog buffer can be found */
 #define PFOW_PROGRAM_BUFFER_OFFSET             0x0040
 /* Size of program buffer in words */
 #define PFOW_PROGRAM_BUFFER_SIZE               0x0042
index 65b1c1c..a725b62 100644 (file)
@@ -492,22 +492,22 @@ struct nand_sdr_timings {
 };
 
 /**
- * enum nand_data_interface_type - NAND interface timing type
+ * enum nand_interface_type - NAND interface type
  * @NAND_SDR_IFACE:    Single Data Rate interface
  */
-enum nand_data_interface_type {
+enum nand_interface_type {
        NAND_SDR_IFACE,
 };
 
 /**
- * struct nand_data_interface - NAND interface timing
+ * struct nand_interface_config - NAND interface timing
  * @type:       type of the timing
  * @timings:    The timing information
  * @timings.mode: Timing mode as defined in the specification
  * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
  */
-struct nand_data_interface {
-       enum nand_data_interface_type type;
+struct nand_interface_config {
+       enum nand_interface_type type;
        struct nand_timings {
                unsigned int mode;
                union {
@@ -521,7 +521,7 @@ struct nand_data_interface {
  * @conf:      The data interface
  */
 static inline const struct nand_sdr_timings *
-nand_get_sdr_timings(const struct nand_data_interface *conf)
+nand_get_sdr_timings(const struct nand_interface_config *conf)
 {
        if (conf->type != NAND_SDR_IFACE)
                return ERR_PTR(-EINVAL);
@@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix,
  *              This method replaces chip->legacy.cmdfunc(),
  *              chip->legacy.{read,write}_{buf,byte,word}(),
  *              chip->legacy.dev_ready() and chip->legacy.waifunc().
- * @setup_data_interface: setup the data interface and timing. If
- *                       chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- *                       means the configuration should not be applied but
- *                       only checked.
- *                       This hook is optional.
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ *                  %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ *                  should not be applied but only checked.
+ *                  This hook is optional.
  */
 struct nand_controller_ops {
        int (*attach_chip)(struct nand_chip *chip);
@@ -956,8 +955,8 @@ struct nand_controller_ops {
        int (*exec_op)(struct nand_chip *chip,
                       const struct nand_operation *op,
                       bool check_only);
-       int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
-                                   const struct nand_data_interface *conf);
+       int (*setup_interface)(struct nand_chip *chip, int chipnr,
+                              const struct nand_interface_config *conf);
 };
 
 /**
@@ -1028,140 +1027,138 @@ struct nand_legacy {
 };
 
 /**
- * struct nand_chip - NAND Private Flash Chip Data
- * @base:              Inherit from the generic NAND device
- * @legacy:            All legacy fields/hooks. If you develop a new driver,
- *                     don't even try to use any of these fields/hooks, and if
- *                     you're modifying an existing driver that is using those
- *                     fields/hooks, you should consider reworking the driver
- *                     avoid using them.
- * @setup_read_retry:  [FLASHSPECIFIC] flash (vendor) specific function for
- *                     setting the read-retry mode. Mostly needed for MLC NAND.
- * @ecc:               [BOARDSPECIFIC] ECC control structure
- * @buf_align:         minimum buffer alignment required by a platform
- * @oob_poi:           "poison value buffer," used for laying out OOB data
- *                     before writing
- * @page_shift:                [INTERN] number of address bits in a page (column
- *                     address bits).
- * @phys_erase_shift:  [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift:   [INTERN] number of address bits in a bbt entry
- * @chip_shift:                [INTERN] number of address bits in one chip
- * @options:           [BOARDSPECIFIC] various chip options. They can partly
- *                     be set to inform nand_scan about special functionality.
- *                     See the defines for further explanation.
- * @bbt_options:       [INTERN] bad block specific options. All options used
- *                     here must come from bbm.h. By default, these options
- *                     will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos:       [INTERN] position of the bad block marker in the oob
- *                     area.
- * @badblockbits:      [INTERN] minimum number of set bits in a good block's
- *                     bad block marker position; i.e., BBM == 11110111b is
- *                     not bad when badblockbits == 7
- * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- *                           set to the actually used ONFI mode if the chip is
- *                           ONFI compliant or deduced from the datasheet if
- *                           the NAND chip is not ONFI compliant.
- * @pagemask:          [INTERN] page number mask = number of (pages / chip) - 1
- * @data_buf:          [INTERN] buffer for data, size is (page size + oobsize).
- * @pagecache:         Structure containing page cache related fields
- * @pagecache.bitflips:        Number of bitflips of the cached page
- * @pagecache.page:    Page number currently in the cache. -1 means no page is
- *                     currently cached
- * @subpagesize:       [INTERN] holds the subpagesize
- * @id:                        [INTERN] holds NAND ID
- * @parameters:                [INTERN] holds generic parameters under an easily
- *                     readable form.
- * @data_interface:    [INTERN] NAND interface timing information
- * @cur_cs:            currently selected target. -1 means no target selected,
- *                     otherwise we should always have cur_cs >= 0 &&
- *                     cur_cs < nanddev_ntargets(). NAND Controller drivers
- *                     should not modify this value, but they're allowed to
- *                     read it.
- * @read_retries:      [INTERN] the number of read retry modes supported
- * @lock:              lock protecting the suspended field. Also used to
- *                     serialize accesses to the NAND device.
- * @suspended:         set to 1 when the device is suspended, 0 when it's not.
- * @suspend:           [REPLACEABLE] specific NAND device suspend operation
- * @resume:            [REPLACEABLE] specific NAND device resume operation
- * @bbt:               [INTERN] bad block table pointer
- * @bbt_td:            [REPLACEABLE] bad block table descriptor for flash
- *                     lookup.
- * @bbt_md:            [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern:  [REPLACEABLE] bad block scan pattern used for initial
- *                     bad block scan.
- * @controller:                [REPLACEABLE] a pointer to a hardware controller
- *                     structure which is shared among multiple independent
- *                     devices.
- * @priv:              [OPTIONAL] pointer to private chip data
- * @manufacturer:      [INTERN] Contains manufacturer information
- * @manufacturer.desc: [INTERN] Contains manufacturer's description
- * @manufacturer.priv: [INTERN] Contains manufacturer private information
- * @lock_area:         [REPLACEABLE] specific NAND chip lock operation
- * @unlock_area:       [REPLACEABLE] specific NAND chip unlock operation
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
+ */
+struct nand_chip_ops {
+       int (*suspend)(struct nand_chip *chip);
+       void (*resume)(struct nand_chip *chip);
+       int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+       int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+       int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+       int (*choose_interface_config)(struct nand_chip *chip,
+                                      struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
  */
+struct nand_manufacturer {
+       const struct nand_manufacturer_desc *desc;
+       void *priv;
+};
 
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ *          to use any of these fields/hooks, and if you're modifying an
+ *          existing driver that is using those fields/hooks, you should
+ *          consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ *           about special functionality. See the defines for further
+ *           explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ *                         the NAND chip and NAND controller constraints. If
+ *                         unset, the default reset interface configuration must
+ *                         be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ *               come from bbm.h. By default, these options will be copied to
+ *               the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ *                position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ *                  currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ *        to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ *          should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ *          NAND Controller drivers should not modify this value, but they're
+ *          allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @controller: The hardware controller        structure which is shared among multiple
+ *              independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
 struct nand_chip {
        struct nand_device base;
-
+       struct nand_id id;
+       struct nand_parameters parameters;
+       struct nand_manufacturer manufacturer;
+       struct nand_chip_ops ops;
        struct nand_legacy legacy;
+       unsigned int options;
 
-       int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+       /* Data interface */
+       const struct nand_interface_config *current_interface_config;
+       struct nand_interface_config *best_interface_config;
 
-       unsigned int options;
+       /* Bad block information */
+       unsigned int bbt_erase_shift;
        unsigned int bbt_options;
+       unsigned int badblockpos;
+       unsigned int badblockbits;
+       struct nand_bbt_descr *bbt_td;
+       struct nand_bbt_descr *bbt_md;
+       struct nand_bbt_descr *badblock_pattern;
+       u8 *bbt;
 
-       int page_shift;
-       int phys_erase_shift;
-       int bbt_erase_shift;
-       int chip_shift;
-       int pagemask;
-       u8 *data_buf;
+       /* Device internal layout */
+       unsigned int page_shift;
+       unsigned int phys_erase_shift;
+       unsigned int chip_shift;
+       unsigned int pagemask;
+       unsigned int subpagesize;
 
+       /* Buffers */
+       u8 *data_buf;
+       u8 *oob_poi;
        struct {
                unsigned int bitflips;
                int page;
        } pagecache;
+       unsigned long buf_align;
 
-       int subpagesize;
-       int onfi_timing_mode_default;
-       unsigned int badblockpos;
-       int badblockbits;
-
-       struct nand_id id;
-       struct nand_parameters parameters;
-
-       struct nand_data_interface data_interface;
-
-       int cur_cs;
-
-       int read_retries;
-
+       /* Internals */
        struct mutex lock;
        unsigned int suspended : 1;
-       int (*suspend)(struct nand_chip *chip);
-       void (*resume)(struct nand_chip *chip);
+       int cur_cs;
+       int read_retries;
 
-       uint8_t *oob_poi;
+       /* Externals */
        struct nand_controller *controller;
-
        struct nand_ecc_ctrl ecc;
-       unsigned long buf_align;
-
-       uint8_t *bbt;
-       struct nand_bbt_descr *bbt_td;
-       struct nand_bbt_descr *bbt_md;
-
-       struct nand_bbt_descr *badblock_pattern;
-
        void *priv;
-
-       struct {
-               const struct nand_manufacturer *desc;
-               void *priv;
-       } manufacturer;
-
-       int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
-       int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
 };
 
 extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
@@ -1209,6 +1206,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
        return mtd_get_of_node(nand_to_mtd(chip));
 }
 
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ *                             of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+       return chip->current_interface_config;
+}
+
 /*
  * A helper for defining older NAND chips where the second ID byte fully
  * defined the chip, including the geometry (chip size, eraseblock size, page
@@ -1261,10 +1269,6 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
  *               @ecc_step_ds in nand_chip{}, also from the datasheet.
  *               For example, the "4bit ECC for each 512Byte" can be set with
  *               NAND_ECC_INFO(4, 512).
- * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
- *                           reset. Should be deduced from timings described
- *                           in the datasheet.
- *
  */
 struct nand_flash_dev {
        char *name;
@@ -1285,7 +1289,6 @@ struct nand_flash_dev {
                uint16_t strength_ds;
                uint16_t step_ds;
        } ecc;
-       int onfi_timing_mode_default;
 };
 
 int nand_create_bbt(struct nand_chip *chip);
index 1077c45..7b78c4b 100644 (file)
@@ -309,7 +309,7 @@ struct spinand_info {
        struct spinand_devid devid;
        u32 flags;
        struct nand_memory_organization memorg;
-       struct nand_ecc_req eccreq;
+       struct nand_ecc_props eccreq;
        struct spinand_ecc_info eccinfo;
        struct {
                const struct spinand_op_variants *read_cache;
index ae197cc..dcd185c 100644 (file)
@@ -65,6 +65,17 @@ struct mutex {
 #endif
 };
 
+struct ww_class;
+struct ww_acquire_ctx;
+
+struct ww_mutex {
+       struct mutex base;
+       struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+       struct ww_class *ww_class;
+#endif
+};
+
 /*
  * This is the control structure for tasks blocked on mutex,
  * which resides on the blocked task's kernel stack:
index aac42c2..9b67394 100644 (file)
@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
                        int (*output)(struct net *, struct sock *, struct sk_buff *));
        int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
 #if IS_MODULE(CONFIG_IPV6)
-       int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
        int (*br_fragment)(struct net *net, struct sock *sk,
                           struct sk_buff *skb,
                           struct nf_bridge_frag_data *data,
@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
 
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
-                                   u32 user)
-{
-#if IS_MODULE(CONFIG_IPV6)
-       const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
-       if (!v6_ops)
-               return 1;
-
-       return v6_ops->br_defrag(net, skb, user);
-#elif IS_BUILTIN(CONFIG_IPV6)
-       return nf_ct_frag6_gather(net, skb, user);
-#else
-       return 1;
-#endif
-}
-
 int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                    struct nf_bridge_frag_data *data,
                    int (*output)(struct net *, struct sock *sk,
index 4dba3c9..b8360be 100644 (file)
@@ -150,6 +150,12 @@ enum nfs_opnum4 {
        OP_WRITE_SAME = 70,
        OP_CLONE = 71,
 
+       /* xattr support (RFC8726) */
+       OP_GETXATTR                = 72,
+       OP_SETXATTR                = 73,
+       OP_LISTXATTRS              = 74,
+       OP_REMOVEXATTR             = 75,
+
        OP_ILLEGAL = 10044,
 };
 
@@ -159,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/
 #define FIRST_NFS4_OP  OP_ACCESS
 #define LAST_NFS40_OP  OP_RELEASE_LOCKOWNER
 #define LAST_NFS41_OP  OP_RECLAIM_COMPLETE
-#define LAST_NFS42_OP  OP_CLONE
+#define LAST_NFS42_OP  OP_REMOVEXATTR
 #define LAST_NFS4_OP   LAST_NFS42_OP
 
 enum nfsstat4 {
@@ -280,6 +286,10 @@ enum nfsstat4 {
        NFS4ERR_WRONG_LFS = 10092,
        NFS4ERR_BADLABEL = 10093,
        NFS4ERR_OFFLOAD_NO_REQS = 10094,
+
+       /* xattr (RFC8276) */
+       NFS4ERR_NOXATTR        = 10095,
+       NFS4ERR_XATTR2BIG      = 10096,
 };
 
 static inline bool seqid_mutating_err(u32 err)
@@ -452,6 +462,7 @@ enum change_attr_type4 {
 #define FATTR4_WORD2_CHANGE_ATTR_TYPE  (1UL << 15)
 #define FATTR4_WORD2_SECURITY_LABEL     (1UL << 16)
 #define FATTR4_WORD2_MODE_UMASK                (1UL << 17)
+#define FATTR4_WORD2_XATTR_SUPPORT     (1UL << 18)
 
 /* MDS threshold bitmap bits */
 #define THRESHOLD_RD                    (1UL << 0)
@@ -542,6 +553,11 @@ enum {
        NFSPROC4_CLNT_LAYOUTERROR,
 
        NFSPROC4_CLNT_COPY_NOTIFY,
+
+       NFSPROC4_CLNT_GETXATTR,
+       NFSPROC4_CLNT_SETXATTR,
+       NFSPROC4_CLNT_LISTXATTRS,
+       NFSPROC4_CLNT_REMOVEXATTR,
 };
 
 /* nfs41 types */
@@ -700,4 +716,13 @@ struct nl4_server {
                struct nfs42_netaddr    nl4_addr; /* NL4_NETADDR */
        } u;
 };
+
+/*
+ * Options for setxattr. These match the flags for setxattr(2).
+ */
+enum nfs4_setxattr_options {
+       SETXATTR4_EITHER        = 0,
+       SETXATTR4_CREATE        = 1,
+       SETXATTR4_REPLACE       = 2,
+};
 #endif
index 6ee9119..a2c6455 100644 (file)
@@ -102,6 +102,8 @@ struct nfs_delegation;
 
 struct posix_acl;
 
+struct nfs4_xattr_cache;
+
 /*
  * nfs fs inode data in memory
  */
@@ -188,6 +190,10 @@ struct nfs_inode {
        struct fscache_cookie   *fscache;
 #endif
        struct inode            vfs_inode;
+
+#ifdef CONFIG_NFS_V4_2
+       struct nfs4_xattr_cache *xattr_cache;
+#endif
 };
 
 struct nfs4_copy_state {
@@ -212,6 +218,9 @@ struct nfs4_copy_state {
 #define NFS_ACCESS_EXTEND      0x0008
 #define NFS_ACCESS_DELETE      0x0010
 #define NFS_ACCESS_EXECUTE     0x0020
+#define NFS_ACCESS_XAREAD      0x0040
+#define NFS_ACCESS_XAWRITE     0x0080
+#define NFS_ACCESS_XALIST      0x0100
 
 /*
  * Cache validity bit flags
@@ -231,6 +240,7 @@ struct nfs4_copy_state {
 #define NFS_INO_DATA_INVAL_DEFER       \
                                BIT(13)         /* Deferred cache invalidation */
 #define NFS_INO_INVALID_BLOCKS BIT(14)         /* cached blocks are invalid */
+#define NFS_INO_INVALID_XATTR  BIT(15)         /* xattrs are invalid */
 
 #define NFS_INO_INVALID_ATTR   (NFS_INO_INVALID_CHANGE \
                | NFS_INO_INVALID_CTIME \
@@ -490,6 +500,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
                        struct nfs_fattr *fattr, struct nfs4_label *label);
 extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
 extern void nfs_access_zap_cache(struct inode *inode);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
+                                bool may_block);
 
 /*
  * linux/fs/nfs/symlink.c
index 465fa98..7eae72a 100644 (file)
@@ -163,6 +163,11 @@ struct nfs_server {
        unsigned int            dtsize;         /* readdir size */
        unsigned short          port;           /* "port=" setting */
        unsigned int            bsize;          /* server block size */
+#ifdef CONFIG_NFS_V4_2
+       unsigned int            gxasize;        /* getxattr size */
+       unsigned int            sxasize;        /* setxattr size */
+       unsigned int            lxasize;        /* listxattr size */
+#endif
        unsigned int            acregmin;       /* attr cache timeouts */
        unsigned int            acregmax;
        unsigned int            acdirmin;
@@ -281,5 +286,6 @@ struct nfs_server {
 #define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
 #define NFS_CAP_LAYOUTERROR    (1U << 26)
 #define NFS_CAP_COPY_NOTIFY    (1U << 27)
+#define NFS_CAP_XATTR          (1U << 28)
 
 #endif
index 5fd0a9e..9408f32 100644 (file)
@@ -150,6 +150,7 @@ struct nfs_fsinfo {
        __u32                   layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
        __u32                   blksize; /* preferred pnfs io block size */
        __u32                   clone_blksize; /* granularity of a CLONE operation */
+       __u32                   xattr_support; /* User xattrs supported */
 };
 
 struct nfs_fsstat {
@@ -1497,7 +1498,64 @@ struct nfs42_seek_res {
        u32     sr_eof;
        u64     sr_offset;
 };
-#endif
+
+struct nfs42_setxattrargs {
+       struct nfs4_sequence_args       seq_args;
+       struct nfs_fh                   *fh;
+       const char                      *xattr_name;
+       u32                             xattr_flags;
+       size_t                          xattr_len;
+       struct page                     **xattr_pages;
+};
+
+struct nfs42_setxattrres {
+       struct nfs4_sequence_res        seq_res;
+       struct nfs4_change_info         cinfo;
+};
+
+struct nfs42_getxattrargs {
+       struct nfs4_sequence_args       seq_args;
+       struct nfs_fh                   *fh;
+       const char                      *xattr_name;
+       size_t                          xattr_len;
+       struct page                     **xattr_pages;
+};
+
+struct nfs42_getxattrres {
+       struct nfs4_sequence_res        seq_res;
+       size_t                          xattr_len;
+};
+
+struct nfs42_listxattrsargs {
+       struct nfs4_sequence_args       seq_args;
+       struct nfs_fh                   *fh;
+       u32                             count;
+       u64                             cookie;
+       struct page                     **xattr_pages;
+};
+
+struct nfs42_listxattrsres {
+       struct nfs4_sequence_res        seq_res;
+       struct page                     *scratch;
+       void                            *xattr_buf;
+       size_t                          xattr_len;
+       u64                             cookie;
+       bool                            eof;
+       size_t                          copied;
+};
+
+struct nfs42_removexattrargs {
+       struct nfs4_sequence_args       seq_args;
+       struct nfs_fh                   *fh;
+       const char                      *xattr_name;
+};
+
+struct nfs42_removexattrres {
+       struct nfs4_sequence_res        seq_res;
+       struct nfs4_change_info         cinfo;
+};
+
+#endif /* CONFIG_NFS_V4_2 */
 
 struct nfs_page;
 
index c696c26..f022f58 100644 (file)
@@ -48,7 +48,7 @@ struct oom_control {
        /* Used by oom implementation, do not set */
        unsigned long totalpages;
        struct task_struct *chosen;
-       unsigned long chosen_points;
+       long chosen_points;
 
        /* Used to print the constraint info. */
        enum oom_constraint constraint;
@@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
 
 bool __oom_reap_task_mm(struct mm_struct *mm);
 
-extern unsigned long oom_badness(struct task_struct *p,
+long oom_badness(struct task_struct *p,
                unsigned long totalpages);
 
 extern bool out_of_memory(struct oom_control *oc);
index d1f4eff..7de11dc 100644 (file)
@@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
        if (PageHuge(head))
                return head;
 
-       return head + (index & (hpage_nr_pages(head) - 1));
+       return head + (index & (thp_nr_pages(head) - 1));
 }
 
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
@@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)
 
        page = xa_load(&rac->mapping->i_pages, rac->_index);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       rac->_batch_count = hpage_nr_pages(page);
+       rac->_batch_count = thp_nr_pages(page);
 
        return page;
 }
@@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageTail(page), page);
                array[i++] = page;
-               rac->_batch_count += hpage_nr_pages(page);
+               rac->_batch_count += thp_nr_pages(page);
 
                /*
                 * The page cache isn't using multi-index entries yet,
index 5c709a1..1ab1e24 100644 (file)
 #define PCI_DEVICE_ID_INTEL_80332_1    0x0332
 #define PCI_DEVICE_ID_INTEL_80333_0    0x0370
 #define PCI_DEVICE_ID_INTEL_80333_1    0x0372
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC       0x0435
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF    0x0443
 #define PCI_DEVICE_ID_INTEL_82375      0x0482
 #define PCI_DEVICE_ID_INTEL_82424      0x0483
 #define PCI_DEVICE_ID_INTEL_82378      0x0484
 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI     0x1577
 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE  0x1578
 #define PCI_DEVICE_ID_INTEL_80960_RP   0x1960
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX  0x19e2
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF       0x19e3
 #define PCI_DEVICE_ID_INTEL_82840_HB   0x1a21
 #define PCI_DEVICE_ID_INTEL_82845_HB   0x1a30
 #define PCI_DEVICE_ID_INTEL_IOAT       0x1a38
 #define PCI_DEVICE_ID_INTEL_IOAT_JSF7  0x3717
 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8  0x3718
 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9  0x3719
+#define PCI_DEVICE_ID_INTEL_QAT_C62X   0x37c8
+#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF        0x37c9
 #define PCI_DEVICE_ID_INTEL_ICH10_0    0x3a14
 #define PCI_DEVICE_ID_INTEL_ICH10_1    0x3a16
 #define PCI_DEVICE_ID_INTEL_ICH10_2    0x3a18
index 53e97da..e8cbc2e 100644 (file)
@@ -117,7 +117,9 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
  */
+#ifndef pgd_offset_k
 #define pgd_offset_k(address)          pgd_offset(&init_mm, (address))
+#endif
 
 /*
  * In many cases it is known that a virtual address is mapped at PMD or PTE
@@ -804,7 +806,7 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
 
 /*
  * No-op macros that just return the current protection value. Defined here
- * because these macros can be used used even if CONFIG_MMU is not defined.
+ * because these macros can be used even if CONFIG_MMU is not defined.
  */
 
 #ifndef pgprot_nx
@@ -1234,7 +1236,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
  * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
  * the only case the kernel cares is for NUMA balancing and is only ever set
  * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * _PAGE_PROTNONE so by default, implement the helper as "always no". It
  * is the responsibility of the caller to distinguish between PROT_NONE
  * protections and NUMA hinting fault protections.
  */
@@ -1318,10 +1320,10 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 /*
  * ARCHes with special requirements for evicting THP backing TLB entries can
  * implement this. Otherwise also, it can help optimize normal TLB flush in
- * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshold, which will
- * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
+ * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single THP flush will
+ * invalidate the entire TLB which is not desirable.
  * e.g. see arch/arc: flush_pmd_tlb_range
  */
 #define flush_pmd_tlb_range(vma, addr, end)    flush_tlb_range(vma, addr, end)
index a8e8763..c36fb41 100644 (file)
@@ -402,7 +402,8 @@ void pcs_get_state(struct phylink_pcs *pcs,
  * For most 10GBASE-R, there is no advertisement.
  */
 int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
-              phy_interface_t interface, const unsigned long *advertising);
+              phy_interface_t interface, const unsigned long *advertising,
+              bool permit_pause_to_mac);
 
 /**
  * pcs_an_restart() - restart 802.3z BaseX autonegotiation
similarity index 51%
rename from include/linux/platform_data/clk-st.h
rename to include/linux/platform_data/clk-fch.h
index 7cdb6a4..b9f6824 100644 (file)
@@ -1,17 +1,18 @@
 /* SPDX-License-Identifier: MIT */
 /*
- * clock framework for AMD Stoney based clock
+ * clock framework for AMD misc clocks
  *
  * Copyright 2018 Advanced Micro Devices, Inc.
  */
 
-#ifndef __CLK_ST_H
-#define __CLK_ST_H
+#ifndef __CLK_FCH_H
+#define __CLK_FCH_H
 
 #include <linux/compiler.h>
 
-struct st_clk_data {
+struct fch_clk_data {
        void __iomem *base;
+       u32 is_rv;
 };
 
-#endif /* __CLK_ST_H */
+#endif /* __CLK_FCH_H */
index 91e77f5..1fcfe9e 100644 (file)
@@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 {
 #define USBC_PD_CC_UFP_ATTACHED        4 /* UFP attached to usbc */
 #define USBC_PD_CC_DFP_ATTACHED        5 /* DPF attached to usbc */
 
+/* Active/Passive Cable */
+#define USB_PD_CTRL_ACTIVE_CABLE        BIT(0)
+/* Optical/Non-optical cable */
+#define USB_PD_CTRL_OPTICAL_CABLE       BIT(1)
+/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */
+#define USB_PD_CTRL_TBT_LEGACY_ADAPTER  BIT(2)
+/* Active Link Uni-Direction */
+#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR  BIT(3)
+
 struct ec_response_usb_pd_control_v2 {
        uint8_t enabled;
        uint8_t role;
        uint8_t polarity;
        char state[32];
-       uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */
-       uint8_t dp_mode;  /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
-       /* CL:1500994 Current cable type */
-       uint8_t reserved_cable_type;
+       uint8_t cc_state;       /* enum pd_cc_states representing cc state */
+       uint8_t dp_mode;        /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
+       uint8_t reserved;       /* Reserved for future use */
+       uint8_t control_flags;  /* USB_PD_CTRL_*flags */
+       uint8_t cable_speed;    /* TBT_SS_* cable speed */
+       uint8_t cable_gen;      /* TBT_GEN3_* cable rounded support */
 } __ec_align1;
 
 #define EC_CMD_USB_PD_PORTS 0x0102
@@ -5207,11 +5218,15 @@ struct ec_params_usb_pd_mux_info {
 } __ec_align1;
 
 /* Flags representing mux state */
-#define USB_PD_MUX_USB_ENABLED       BIT(0) /* USB connected */
-#define USB_PD_MUX_DP_ENABLED        BIT(1) /* DP connected */
-#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
-#define USB_PD_MUX_HPD_IRQ           BIT(3) /* HPD IRQ is asserted */
-#define USB_PD_MUX_HPD_LVL           BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_NONE               0      /* Open switch */
+#define USB_PD_MUX_USB_ENABLED        BIT(0) /* USB connected */
+#define USB_PD_MUX_DP_ENABLED         BIT(1) /* DP connected */
+#define USB_PD_MUX_POLARITY_INVERTED  BIT(2) /* CC line Polarity inverted */
+#define USB_PD_MUX_HPD_IRQ            BIT(3) /* HPD IRQ is asserted */
+#define USB_PD_MUX_HPD_LVL            BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_SAFE_MODE          BIT(5) /* DP is in safe mode */
+#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */
+#define USB_PD_MUX_USB4_ENABLED       BIT(7) /* USB4 enabled */
 
 struct ec_response_usb_pd_mux_info {
        uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
index 3832433..4a415ae 100644 (file)
@@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
 int cros_ec_check_result(struct cros_ec_device *ec_dev,
                         struct cros_ec_command *msg);
 
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
-                    struct cros_ec_command *msg);
-
 int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
                            struct cros_ec_command *msg);
 
index 3fbf9f2..bc208c6 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * TI DaVinci CPUFreq platform support.
  *
- * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/
  */
 
 #ifndef _MACH_DAVINCI_CPUFREQ_H
index 7fe80f1..5d1fb0d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * TI DaVinci Audio Serial Port support
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index 0f491d8..3cc78f0 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * BCH Error Location Module
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
  */
 
 #ifndef __ELM_H
index a93841b..e182a46 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DaVinci GPIO Platform Related Defines
  *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index ef663e5..c9cc4e3 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * OMAP GPMC Platform data
  *
- * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com
  *     Roger Quadros <rogerq@ti.com>
  */
 
index 9cffa9a..1af9c01 100644 (file)
  *
  * TYPE1 HW watchdog implementation exist in old systems.
  * All new systems have TYPE2 HW watchdog.
+ * TYPE3 HW watchdog can exist on all systems with new CPLD.
+ * TYPE3 is selected by WD capability bit.
  */
 enum mlxreg_wdt_type {
        MLX_WDT_TYPE1,
        MLX_WDT_TYPE2,
+       MLX_WDT_TYPE3,
 };
 
 /**
@@ -93,7 +96,7 @@ struct mlxreg_core_data {
        umode_t mode;
        struct device_node *np;
        struct mlxreg_hotplug_device hpdev;
-       u8 health_cntr;
+       u32 health_cntr;
        bool attached;
        u8 regnum;
 };
index a403dd5..a498262 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * TI DaVinci AEMIF support
  *
- * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
  *
  * This file is licensed under the terms of the GNU General Public License
  * version 2. This program is licensed "as is" without any warranty of any
index 8419c8c..0dd851e 100644 (file)
@@ -3,7 +3,7 @@
  * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030
  *                 codec, header.
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
  * All rights reserved.
  *
  * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
deleted file mode 100644 (file)
index 02653d9..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sky81452.h  SKY81452 backlight driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- */
-
-#ifndef _SKY81452_BACKLIGHT_H
-#define _SKY81452_BACKLIGHT_H
-
-/**
- * struct sky81452_platform_data
- * @name:      backlight driver name.
-               If it is not defined, default name is lcd-backlight.
- * @gpio_enable:GPIO number which control EN pin
- * @enable:    Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
- * @ignore_pwm:        true if DPWMI should be ignored.
- * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
- * @phase_shift:true is phase shift mode.
- * @short_detecion_threshold:  It should be one of 4, 5, 6 and 7V.
- * @boost_current_limit:       It should be one of 2300, 2750mA.
- */
-struct sky81452_bl_platform_data {
-       const char *name;
-       int gpio_enable;
-       unsigned int enable;
-       bool ignore_pwm;
-       bool dpwm_mode;
-       bool phase_shift;
-       unsigned int short_detection_threshold;
-       unsigned int boost_current_limit;
-};
-
-#endif
index 3d47d21..31f2e22 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Platform data for uio_pruss driver
  *
- * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
index fa579b4..5e70d66 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * usb-omap.h - Platform data for the various OMAP USB IPs
  *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
  *
  * This software is distributed under the terms of the GNU General Public
  * License ("GPL") version 2, as published by the Free Software Foundation.
index df34330..dc8ae5d 100644 (file)
 #define LIST_POISON2  ((void *) 0x122 + POISON_POINTER_DELTA)
 
 /********** include/linux/timer.h **********/
-/*
- * Magic number "tsta" to indicate a static timer initializer
- * for the object debugging code.
- */
 #define TIMER_ENTRY_STATIC     ((void *) 0x300 + POISON_POINTER_DELTA)
 
 /********** mm/page_poison.c **********/
index e3f0f85..896c16d 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/list.h>
 #include <linux/alarmtimer.h>
 #include <linux/timerqueue.h>
+#include <linux/task_work.h>
 
 struct kernel_siginfo;
 struct task_struct;
@@ -125,6 +126,16 @@ struct posix_cputimers {
        unsigned int                    expiry_active;
 };
 
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work:      The task work to be scheduled
+ * @scheduled:  @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+       struct callback_head    work;
+       unsigned int            scheduled;
+};
+
 static inline void posix_cputimers_init(struct posix_cputimers *pct)
 {
        memset(pct, 0, sizeof(*pct));
@@ -165,6 +176,12 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
                                              u64 cpu_limit) { }
 #endif
 
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void posix_cputimers_init_work(void);
+#else
+static inline void posix_cputimers_init_work(void) { }
+#endif
+
 #define REQUEUE_PENDING 1
 
 /**
index 2635b2a..a13ff38 100644 (file)
@@ -39,7 +39,7 @@ enum pwm_polarity {
  * current PWM hardware state.
  */
 struct pwm_args {
-       unsigned int period;
+       u64 period;
        enum pwm_polarity polarity;
 };
 
@@ -56,8 +56,8 @@ enum {
  * @enabled: PWM enabled status
  */
 struct pwm_state {
-       unsigned int period;
-       unsigned int duty_cycle;
+       u64 period;
+       u64 duty_cycle;
        enum pwm_polarity polarity;
        bool enabled;
 };
@@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
        return state.enabled;
 }
 
-static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
 {
        if (pwm)
                pwm->state.period = period;
 }
 
-static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
+static inline u64 pwm_get_period(const struct pwm_device *pwm)
 {
        struct pwm_state state;
 
@@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
                pwm->state.duty_cycle = duty;
 }
 
-static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
+static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
 {
        struct pwm_state state;
 
index e7b7bab..2fa68bf 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <linux/virtio.h>
+#include <linux/cdev.h>
 #include <linux/completion.h>
 #include <linux/idr.h>
 #include <linux/of.h>
@@ -359,6 +360,7 @@ enum rsc_handling_status {
  * @unprepare: unprepare device after stop
  * @start:     power on the device and boot it
  * @stop:      power off the device
+ * @attach:    attach to a device that his already powered up
  * @kick:      kick a virtqueue (virtqueue id given as a parameter)
  * @da_to_va:  optional platform hook to perform address translations
  * @parse_fw:  parse firmware to extract information (e.g. resource table)
@@ -379,6 +381,7 @@ struct rproc_ops {
        int (*unprepare)(struct rproc *rproc);
        int (*start)(struct rproc *rproc);
        int (*stop)(struct rproc *rproc);
+       int (*attach)(struct rproc *rproc);
        void (*kick)(struct rproc *rproc, int vqid);
        void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len);
        int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
@@ -400,6 +403,8 @@ struct rproc_ops {
  * @RPROC_RUNNING:     device is up and running
  * @RPROC_CRASHED:     device has crashed; need to start recovery
  * @RPROC_DELETED:     device is deleted
+ * @RPROC_DETACHED:    device has been booted by another entity and waiting
+ *                     for the core to attach to it
  * @RPROC_LAST:                just keep this one at the end
  *
  * Please note that the values of these states are used as indices
@@ -414,7 +419,8 @@ enum rproc_state {
        RPROC_RUNNING   = 2,
        RPROC_CRASHED   = 3,
        RPROC_DELETED   = 4,
-       RPROC_LAST      = 5,
+       RPROC_DETACHED  = 5,
+       RPROC_LAST      = 6,
 };
 
 /**
@@ -435,6 +441,20 @@ enum rproc_crash_type {
 };
 
 /**
+ * enum rproc_dump_mechanism - Coredump options for core
+ * @RPROC_COREDUMP_DEFAULT:    Copy dump to separate buffer and carry on with
+                               recovery
+ * @RPROC_COREDUMP_INLINE:     Read segments directly from device memory. Stall
+                               recovery until all segments are read
+ * @RPROC_COREDUMP_DISABLED:   Don't perform any dump
+ */
+enum rproc_dump_mechanism {
+       RPROC_COREDUMP_DEFAULT,
+       RPROC_COREDUMP_INLINE,
+       RPROC_COREDUMP_DISABLED,
+};
+
+/**
  * struct rproc_dump_segment - segment info from ELF header
  * @node:      list node related to the rproc segment list
  * @da:                device address of the segment
@@ -451,7 +471,7 @@ struct rproc_dump_segment {
 
        void *priv;
        void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
-                    void *dest);
+                    void *dest, size_t offset, size_t size);
        loff_t offset;
 };
 
@@ -466,6 +486,7 @@ struct rproc_dump_segment {
  * @dev: virtual device for refcounting and common remoteproc behavior
  * @power: refcount of users who need this rproc powered up
  * @state: state of the device
+ * @dump_conf: Currently selected coredump configuration
  * @lock: lock which protects concurrent manipulations of the rproc
  * @dbg_dir: debugfs directory of this rproc device
  * @traces: list of trace buffers
@@ -486,8 +507,11 @@ struct rproc_dump_segment {
  * @table_sz: size of @cached_table
  * @has_iommu: flag to indicate if remote processor is behind an MMU
  * @auto_boot: flag to indicate if remote processor should be auto-started
+ * @autonomous: true if an external entity has booted the remote processor
  * @dump_segments: list of segments in the firmware
  * @nb_vdev: number of vdev currently handled by rproc
+ * @char_dev: character device of the rproc
+ * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
  */
 struct rproc {
        struct list_head node;
@@ -499,6 +523,7 @@ struct rproc {
        struct device dev;
        atomic_t power;
        unsigned int state;
+       enum rproc_dump_mechanism dump_conf;
        struct mutex lock;
        struct dentry *dbg_dir;
        struct list_head traces;
@@ -519,10 +544,13 @@ struct rproc {
        size_t table_sz;
        bool has_iommu;
        bool auto_boot;
+       bool autonomous;
        struct list_head dump_segments;
        int nb_vdev;
        u8 elf_class;
        u16 elf_machine;
+       struct cdev cdev;
+       bool cdev_put_on_release;
 };
 
 /**
@@ -603,6 +631,7 @@ void rproc_put(struct rproc *rproc);
 int rproc_add(struct rproc *rproc);
 int rproc_del(struct rproc *rproc);
 void rproc_free(struct rproc *rproc);
+void rproc_resource_cleanup(struct rproc *rproc);
 
 struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
                               const struct rproc_ops *ops,
@@ -630,7 +659,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
                                      dma_addr_t da, size_t size,
                                      void (*dumpfn)(struct rproc *rproc,
                                                     struct rproc_dump_segment *segment,
-                                                    void *dest),
+                                                    void *dest, size_t offset,
+                                                    size_t size),
                                      void *priv);
 int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
 
diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
deleted file mode 100644 (file)
index 0820edc..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Copyright (C) 2019 Linaro Ltd. */
-
-#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__
-#define __QCOM_Q6V5_IPA_NOTIFY_H__
-
-#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
-
-#include <linux/remoteproc.h>
-
-enum qcom_rproc_event {
-       MODEM_STARTING  = 0,    /* Modem is about to be started */
-       MODEM_RUNNING   = 1,    /* Startup complete; modem is operational */
-       MODEM_STOPPING  = 2,    /* Modem is about to shut down */
-       MODEM_CRASHED   = 3,    /* Modem has crashed (implies stopping) */
-       MODEM_OFFLINE   = 4,    /* Modem is now offline */
-       MODEM_REMOVING  = 5,    /* Modem is about to be removed */
-};
-
-typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event);
-
-struct qcom_rproc_ipa_notify {
-       struct rproc_subdev subdev;
-
-       qcom_ipa_notify_t notify;
-       void *data;
-};
-
-/**
- * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice
- * @rproc:     rproc handle
- * @ipa_notify:        IPA notification subdevice handle
- *
- * Register the @ipa_notify subdevice with the @rproc so modem events
- * can be sent to IPA when they occur.
- *
- * This is defined in "qcom_q6v5_ipa_notify.c".
- */
-void qcom_add_ipa_notify_subdev(struct rproc *rproc,
-               struct qcom_rproc_ipa_notify *ipa_notify);
-
-/**
- * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice
- * @rproc:     rproc handle
- * @ipa_notify:        IPA notification subdevice handle
- *
- * This is defined in "qcom_q6v5_ipa_notify.c".
- */
-void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
-               struct qcom_rproc_ipa_notify *ipa_notify);
-
-/**
- * qcom_register_ipa_notify() - Register IPA notification function
- * @rproc:     Remote processor handle
- * @notify:    Non-null IPA notification callback function pointer
- * @data:      Data supplied to IPA notification callback function
- *
- * @Return: 0 if successful, or a negative error code otherwise
- *
- * This is defined in "qcom_q6v5_mss.c".
- */
-int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
-                            void *data);
-/**
- * qcom_deregister_ipa_notify() - Deregister IPA notification function
- * @rproc:     Remote processor handle
- *
- * This is defined in "qcom_q6v5_mss.c".
- */
-void qcom_deregister_ipa_notify(struct rproc *rproc);
-
-#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
-struct qcom_rproc_ipa_notify { /* empty */ };
-
-#define qcom_add_ipa_notify_subdev(rproc, ipa_notify)          /* no-op */
-#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify)       /* no-op */
-
-#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
-#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */
index fa8e386..6470516 100644 (file)
@@ -5,17 +5,43 @@ struct notifier_block;
 
 #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
 
-int qcom_register_ssr_notifier(struct notifier_block *nb);
-void qcom_unregister_ssr_notifier(struct notifier_block *nb);
+/**
+ * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
+ * processor.
+ *
+ * @QCOM_SSR_BEFORE_POWERUP:   Remoteproc about to start (prepare stage)
+ * @QCOM_SSR_AFTER_POWERUP:    Remoteproc is running (start stage)
+ * @QCOM_SSR_BEFORE_SHUTDOWN:  Remoteproc crashed or shutting down (stop stage)
+ * @QCOM_SSR_AFTER_SHUTDOWN:   Remoteproc is down (unprepare stage)
+ */
+enum qcom_ssr_notify_type {
+       QCOM_SSR_BEFORE_POWERUP,
+       QCOM_SSR_AFTER_POWERUP,
+       QCOM_SSR_BEFORE_SHUTDOWN,
+       QCOM_SSR_AFTER_SHUTDOWN,
+};
+
+struct qcom_ssr_notify_data {
+       const char *name;
+       bool crashed;
+};
+
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
 
 #else
 
-static inline int qcom_register_ssr_notifier(struct notifier_block *nb)
+static inline void *qcom_register_ssr_notifier(const char *name,
+                                              struct notifier_block *nb)
 {
-       return 0;
+       return NULL;
 }
 
-static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {}
+static inline int qcom_unregister_ssr_notifier(void *notify,
+                                              struct notifier_block *nb)
+{
+       return 0;
+}
 
 #endif
 
index 7b22366..8ed37f9 100644 (file)
@@ -206,7 +206,7 @@ struct rmi_device_platform_data_spi {
  *
  * @reset_delay_ms - after issuing a reset command to the touch sensor, the
  * driver waits a few milliseconds to give the firmware a chance to
- * to re-initialize.  You can override the default wait period here.
+ * re-initialize.  You can override the default wait period here.
  * @irq: irq associated with the attn gpio line, or negative
  */
 struct rmi_device_platform_data {
index bba3db3..22d1575 100644 (file)
@@ -55,10 +55,6 @@ extern struct class *rtc_class;
  *
  * The (current) exceptions are mostly filesystem hooks:
  *   - the proc() hook for procfs
- *   - non-ioctl() chardev hooks:  open(), release()
- *
- * REVISIT those periodic irq calls *do* have ops_lock when they're
- * issued through ioctl() ...
  */
 struct rtc_class_ops {
        int (*ioctl)(struct device *, unsigned int, unsigned long);
index 52bcc9f..93ecd93 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/task_io_accounting.h>
 #include <linux/posix-timers.h>
 #include <linux/rseq.h>
+#include <linux/seqlock.h>
 #include <linux/kcsan.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
@@ -889,6 +890,10 @@ struct task_struct {
        /* Empty if CONFIG_POSIX_CPUTIMERS=n */
        struct posix_cputimers          posix_cputimers;
 
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+       struct posix_cputimers_work     posix_cputimers_work;
+#endif
+
        /* Process credentials: */
 
        /* Tracer's credentials at attach: */
@@ -1049,7 +1054,7 @@ struct task_struct {
        /* Protected by ->alloc_lock: */
        nodemask_t                      mems_allowed;
        /* Seqence number to catch updates: */
-       seqcount_t                      mems_allowed_seq;
+       seqcount_spinlock_t             mems_allowed_seq;
        int                             cpuset_mem_spread_rotor;
        int                             cpuset_slab_spread_rotor;
 #endif
index 85023dd..f889e33 100644 (file)
@@ -178,14 +178,16 @@ static inline bool in_vfork(struct task_struct *tsk)
  */
 static inline gfp_t current_gfp_context(gfp_t flags)
 {
-       if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
+       unsigned int pflags = READ_ONCE(current->flags);
+
+       if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
                /*
                 * NOIO implies both NOIO and NOFS and it is a weaker context
                 * so always make sure it makes precedence
                 */
-               if (current->flags & PF_MEMALLOC_NOIO)
+               if (pflags & PF_MEMALLOC_NOIO)
                        flags &= ~(__GFP_IO | __GFP_FS);
-               else if (current->flags & PF_MEMALLOC_NOFS)
+               else if (pflags & PF_MEMALLOC_NOFS)
                        flags &= ~__GFP_FS;
        }
        return flags;
index ae3060f..a989650 100644 (file)
@@ -88,6 +88,7 @@ struct task_struct *fork_idle(int);
 struct mm_struct *copy_init_mm(void);
 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+int kernel_wait(pid_t pid, int *stat);
 
 extern void free_task(struct task_struct *tsk);
 
index 917d88e..a8ec3b6 100644 (file)
@@ -36,6 +36,9 @@ struct user_struct {
     defined(CONFIG_NET) || defined(CONFIG_IO_URING)
        atomic_long_t locked_vm;
 #endif
+#ifdef CONFIG_WATCH_QUEUE
+       atomic_t nr_watches;    /* The number of watches this user currently has */
+#endif
 
        /* Miscellaneous per-user rate limit */
        struct ratelimit_state ratelimit;
index 54bc204..962d976 100644 (file)
  *
  * Copyrights:
  * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
  */
 
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
 #include <linux/compiler.h>
 #include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
 #include <asm/processor.h>
 
 /*
  * This mechanism can't be used if the protected data contains pointers,
  * as the writer can invalidate a pointer that a reader is following.
  *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKTYPE_t) instead.
+ *
  * If it's desired to automatically handle the sequence counter writer
  * serialization and non-preemptibility requirements, use a sequential
  * lock (seqlock_t) instead.
@@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
-               .dep_map = { .name = #lockname } \
+
+# define SEQCOUNT_DEP_MAP_INIT(lockname)                               \
+               .dep_map = { .name = #lockname }
 
 /**
  * seqcount_init() - runtime initializer for seqcount_t
  * @s: Pointer to the seqcount_t instance
  */
-# define seqcount_init(s)                              \
-       do {                                            \
-               static struct lock_class_key __key;     \
-               __seqcount_init((s), #s, &__key);       \
+# define seqcount_init(s)                                              \
+       do {                                                            \
+               static struct lock_class_key __key;                     \
+               __seqcount_init((s), #s, &__key);                       \
        } while (0)
 
 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
  */
 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
 
+/*
+ * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+#ifdef CONFIG_LOCKDEP
+#define __SEQ_LOCK(expr)       expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/**
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+ * @seqcount:  The real sequence counter
+ * @lock:      Pointer to the associated spinlock
+ *
+ * A plain sequence counter with external writer synchronization by a
+ * spinlock. The spinlock is associated to the sequence count in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ */
+
+/**
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s:         Pointer to the seqcount_LOCKNAME_t instance
+ * @lock:      Pointer to the associated LOCKTYPE
+ */
+
+/*
+ * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @locktype:          actual typename
+ * @lockname:          name
+ * @preemptible:       preemptibility of above locktype
+ * @lockmember:                argument for lockdep_assert_held()
+ */
+#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+typedef struct seqcount_##lockname {                                   \
+       seqcount_t              seqcount;                               \
+       __SEQ_LOCK(locktype     *lock);                                 \
+} seqcount_##lockname##_t;                                             \
+                                                                       \
+static __always_inline void                                            \
+seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+{                                                                      \
+       seqcount_init(&s->seqcount);                                    \
+       __SEQ_LOCK(s->lock = lock);                                     \
+}                                                                      \
+                                                                       \
+static __always_inline seqcount_t *                                    \
+__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s)                        \
+{                                                                      \
+       return &s->seqcount;                                            \
+}                                                                      \
+                                                                       \
+static __always_inline bool                                            \
+__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s)                \
+{                                                                      \
+       return preemptible;                                             \
+}                                                                      \
+                                                                       \
+static __always_inline void                                            \
+__seqcount_##lockname##_assert(seqcount_##lockname##_t *s)             \
+{                                                                      \
+       __SEQ_LOCK(lockdep_assert_held(lockmember));                    \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+{
+       return s;
+}
+
+static inline bool __seqcount_preemptible(seqcount_t *s)
+{
+       return false;
+}
+
+static inline void __seqcount_assert(seqcount_t *s)
+{
+       lockdep_assert_preemption_disabled();
+}
+
+SEQCOUNT_LOCKTYPE(raw_spinlock_t,      raw_spinlock,   false,  s->lock)
+SEQCOUNT_LOCKTYPE(spinlock_t,          spinlock,       false,  s->lock)
+SEQCOUNT_LOCKTYPE(rwlock_t,            rwlock,         false,  s->lock)
+SEQCOUNT_LOCKTYPE(struct mutex,                mutex,          true,   s->lock)
+SEQCOUNT_LOCKTYPE(struct ww_mutex,     ww_mutex,       true,   &s->lock->base)
+
+/**
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name:      Name of the seqcount_LOCKNAME_t instance
+ * @lock:      Pointer to the associated LOCKTYPE
+ */
+
+#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) {                 \
+       .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
+       __SEQ_LOCK(.lock        = (assoc_lock))                         \
+}
+
+#define SEQCNT_SPINLOCK_ZERO(name, lock)       SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)   SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock)         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock)          SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock)       SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+
+
+#define __seqprop_case(s, lockname, prop)                              \
+       seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s),                              \
+       seqcount_t:             __seqcount_##prop((void *)(s)),         \
+       __seqprop_case((s),     raw_spinlock,   prop),                  \
+       __seqprop_case((s),     spinlock,       prop),                  \
+       __seqprop_case((s),     rwlock,         prop),                  \
+       __seqprop_case((s),     mutex,          prop),                  \
+       __seqprop_case((s),     ww_mutex,       prop))
+
+#define __seqcount_ptr(s)              __seqprop(s, ptr)
+#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+
 /**
  * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+#define __read_seqcount_begin(s)                                       \
+       __read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
 {
        unsigned ret;
 
@@ -138,32 +283,38 @@ repeat:
 
 /**
  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
+#define raw_read_seqcount_begin(s)                                     \
+       raw_read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
 {
-       unsigned ret = __read_seqcount_begin(s);
+       unsigned ret = __read_seqcount_t_begin(s);
        smp_rmb();
        return ret;
 }
 
 /**
  * read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
+#define read_seqcount_begin(s)                                         \
+       read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
 {
        seqcount_lockdep_reader_access(s);
-       return raw_read_seqcount_begin(s);
+       return raw_read_seqcount_t_begin(s);
 }
 
 /**
  * raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * raw_read_seqcount opens a read critical section of the given
  * seqcount_t, without any lockdep checking, and without checking or
@@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
+#define raw_read_seqcount(s)                                           \
+       raw_read_seqcount_t(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
 {
        unsigned ret = READ_ONCE(s->sequence);
        smp_rmb();
@@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
 /**
  * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
  *                        lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * raw_seqcount_begin opens a read critical section of the given
  * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
  *
  * Return: count to be passed to read_seqcount_retry()
  */
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+#define raw_seqcount_begin(s)                                          \
+       raw_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
 {
        /*
         * If the counter is odd, let read_seqcount_retry() fail
         * by decrementing the counter.
         */
-       return raw_read_seqcount(s) & ~1;
+       return raw_read_seqcount_t(s) & ~1;
 }
 
 /**
  * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  * @start: count, from read_seqcount_begin()
  *
  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
  *
  * Return: true if a read section retry is required, else false
  */
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start)                                        \
+       __read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
 {
        kcsan_atomic_next(0);
        return unlikely(READ_ONCE(s->sequence) != start);
@@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
 
 /**
  * read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  * @start: count, from read_seqcount_begin()
  *
  * read_seqcount_retry closes the read critical section of given
@@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
  *
  * Return: true if a read section retry is required, else false
  */
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start)                                  \
+       read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
 {
        smp_rmb();
-       return __read_seqcount_retry(s, start);
+       return __read_seqcount_t_retry(s, start);
 }
 
 /**
  * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  */
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+#define raw_write_seqcount_begin(s)                                    \
+do {                                                                   \
+       if (__seqcount_lock_preemptible(s))                             \
+               preempt_disable();                                      \
+                                                                       \
+       raw_write_seqcount_t_begin(__seqcount_ptr(s));                  \
+} while (0)
+
+static inline void raw_write_seqcount_t_begin(seqcount_t *s)
 {
        kcsan_nestable_atomic_begin();
        s->sequence++;
@@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s)
 
 /**
  * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  */
-static inline void raw_write_seqcount_end(seqcount_t *s)
+#define raw_write_seqcount_end(s)                                      \
+do {                                                                   \
+       raw_write_seqcount_t_end(__seqcount_ptr(s));                    \
+                                                                       \
+       if (__seqcount_lock_preemptible(s))                             \
+               preempt_enable();                                       \
+} while (0)
+
+static inline void raw_write_seqcount_t_end(seqcount_t *s)
 {
        smp_wmb();
        s->sequence++;
        kcsan_nestable_atomic_end();
 }
 
-static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
-       raw_write_seqcount_begin(s);
-       seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
 /**
  * write_seqcount_begin_nested() - start a seqcount_t write section with
  *                                 custom lockdep nesting level
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  * @subclass: lockdep nesting level
  *
  * See Documentation/locking/lockdep-design.rst
  */
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
+#define write_seqcount_begin_nested(s, subclass)                       \
+do {                                                                   \
+       __seqcount_assert_lock_held(s);                                 \
+                                                                       \
+       if (__seqcount_lock_preemptible(s))                             \
+               preempt_disable();                                      \
+                                                                       \
+       write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass);     \
+} while (0)
+
+static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
 {
-       lockdep_assert_preemption_disabled();
-       __write_seqcount_begin_nested(s, subclass);
-}
-
-/*
- * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
- *
- * Use for internal seqlock.h code where it's known that preemption is
- * already disabled. For example, seqlock_t write side functions.
- */
-static inline void __write_seqcount_begin(seqcount_t *s)
-{
-       __write_seqcount_begin_nested(s, 0);
+       raw_write_seqcount_t_begin(s);
+       seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
 }
 
 /**
  * write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * write_seqcount_begin opens a write side critical section of the given
  * seqcount_t.
@@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s)
  * non-preemptible. If readers can be invoked from hardirq or softirq
  * context, interrupts or bottom halves must be respectively disabled.
  */
-static inline void write_seqcount_begin(seqcount_t *s)
+#define write_seqcount_begin(s)                                                \
+do {                                                                   \
+       __seqcount_assert_lock_held(s);                                 \
+                                                                       \
+       if (__seqcount_lock_preemptible(s))                             \
+               preempt_disable();                                      \
+                                                                       \
+       write_seqcount_t_begin(__seqcount_ptr(s));                      \
+} while (0)
+
+static inline void write_seqcount_t_begin(seqcount_t *s)
 {
-       write_seqcount_begin_nested(s, 0);
+       write_seqcount_t_begin_nested(s, 0);
 }
 
 /**
  * write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * The write section must've been opened with write_seqcount_begin().
  */
-static inline void write_seqcount_end(seqcount_t *s)
+#define write_seqcount_end(s)                                          \
+do {                                                                   \
+       write_seqcount_t_end(__seqcount_ptr(s));                        \
+                                                                       \
+       if (__seqcount_lock_preemptible(s))                             \
+               preempt_enable();                                       \
+} while (0)
+
+static inline void write_seqcount_t_end(seqcount_t *s)
 {
        seqcount_release(&s->dep_map, _RET_IP_);
-       raw_write_seqcount_end(s);
+       raw_write_seqcount_t_end(s);
 }
 
 /**
  * raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * This can be used to provide an ordering guarantee instead of the usual
  * consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s)
  *             WRITE_ONCE(X, false);
  *      }
  */
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s)                                  \
+       raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
 {
        kcsan_nestable_atomic_begin();
        s->sequence++;
@@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
 /**
  * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
  *                               side operations
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * After write_seqcount_invalidate, no seqcount_t read side operations
  * will complete successfully and see data older than this.
  */
-static inline void write_seqcount_invalidate(seqcount_t *s)
+#define write_seqcount_invalidate(s)                                   \
+       write_seqcount_t_invalidate(__seqcount_ptr(s))
+
+static inline void write_seqcount_t_invalidate(seqcount_t *s)
 {
        smp_wmb();
        kcsan_nestable_atomic_begin();
@@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
 
 /**
  * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * Use seqcount_t latching to switch between two storage places protected
  * by a sequence counter. Doing so allows having interruptible, preemptible,
@@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
  * picking which data copy to read. The full counter value must then be
  * checked with read_seqcount_retry().
  */
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+#define raw_read_seqcount_latch(s)                                     \
+       raw_read_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline int raw_read_seqcount_t_latch(seqcount_t *s)
 {
        /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
        int seq = READ_ONCE(s->sequence); /* ^^^ */
@@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
 
 /**
  * raw_write_seqcount_latch() - redirect readers to even/odd copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
  *
  * The latch technique is a multiversion concurrency control method that allows
  * queries during non-atomic modifications. If you can guarantee queries never
@@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
  *     When data is a dynamic data structure; one should use regular RCU
  *     patterns to manage the lifetimes of the objects within.
  */
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+#define raw_write_seqcount_latch(s)                                    \
+       raw_write_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_latch(seqcount_t *s)
 {
        smp_wmb();      /* prior stores before incrementing "sequence" */
        s->sequence++;
@@ -516,20 +718,20 @@ typedef struct {
        spinlock_t lock;
 } seqlock_t;
 
-#define __SEQLOCK_UNLOCKED(lockname)                   \
-       {                                               \
-               .seqcount = SEQCNT_ZERO(lockname),      \
-               .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
+#define __SEQLOCK_UNLOCKED(lockname)                                   \
+       {                                                               \
+               .seqcount = SEQCNT_ZERO(lockname),                      \
+               .lock = __SPIN_LOCK_UNLOCKED(lockname)                  \
        }
 
 /**
  * seqlock_init() - dynamic initializer for seqlock_t
  * @sl: Pointer to the seqlock_t instance
  */
-#define seqlock_init(sl)                               \
-       do {                                            \
-               seqcount_init(&(sl)->seqcount);         \
-               spin_lock_init(&(sl)->lock);            \
+#define seqlock_init(sl)                                               \
+       do {                                                            \
+               seqcount_init(&(sl)->seqcount);                         \
+               spin_lock_init(&(sl)->lock);                            \
        } while (0)
 
 /**
@@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 static inline void write_seqlock(seqlock_t *sl)
 {
        spin_lock(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       write_seqcount_t_begin(&sl->seqcount);
 }
 
 /**
@@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl)
  */
 static inline void write_sequnlock(seqlock_t *sl)
 {
-       write_seqcount_end(&sl->seqcount);
+       write_seqcount_t_end(&sl->seqcount);
        spin_unlock(&sl->lock);
 }
 
@@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl)
 static inline void write_seqlock_bh(seqlock_t *sl)
 {
        spin_lock_bh(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       write_seqcount_t_begin(&sl->seqcount);
 }
 
 /**
@@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
  */
 static inline void write_sequnlock_bh(seqlock_t *sl)
 {
-       write_seqcount_end(&sl->seqcount);
+       write_seqcount_t_end(&sl->seqcount);
        spin_unlock_bh(&sl->lock);
 }
 
@@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
 static inline void write_seqlock_irq(seqlock_t *sl)
 {
        spin_lock_irq(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       write_seqcount_t_begin(&sl->seqcount);
 }
 
 /**
@@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
  */
 static inline void write_sequnlock_irq(seqlock_t *sl)
 {
-       write_seqcount_end(&sl->seqcount);
+       write_seqcount_t_end(&sl->seqcount);
        spin_unlock_irq(&sl->lock);
 }
 
@@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
        unsigned long flags;
 
        spin_lock_irqsave(&sl->lock, flags);
-       __write_seqcount_begin(&sl->seqcount);
+       write_seqcount_t_begin(&sl->seqcount);
        return flags;
 }
 
@@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
 static inline void
 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
 {
-       write_seqcount_end(&sl->seqcount);
+       write_seqcount_t_end(&sl->seqcount);
        spin_unlock_irqrestore(&sl->lock, flags);
 }
 
 /**
  * read_seqlock_excl() - begin a seqlock_t locking reader section
- * @sl: Pointer to seqlock_t
+ * @sl:        Pointer to seqlock_t
  *
  * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
  * locking reader exclusively locks out *both* other writers *and* other
index 7ac1154..5a472ec 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * K3 Ring Accelerator (RA) subsystem interface
  *
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
  */
 
 #ifndef __SOC_TI_K3_RINGACC_API_H_
index 9745df6..c75ef99 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Keystone Navigator Queue Management Sub-System header
  *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
  * Author:     Sandeep Nair <sandeep_n@ti.com>
  *             Cyril Chemparathy <cyril@ti.com>
  *             Santosh Shilimkar <santosh.shilimkar@ti.com>
index eac8e0c..1f6e76d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Texas Instruments' Message Manager
  *
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
  *     Nishanth Menon
  *
  * This program is free software; you can redistribute it and/or modify
index 96840de..ea19341 100644 (file)
@@ -8,26 +8,9 @@
 #ifndef _LINUX_SOCKPTR_H
 #define _LINUX_SOCKPTR_H
 
-#include <linux/compiler.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
-typedef union {
-       void            *kernel;
-       void __user     *user;
-} sockptr_t;
-
-static inline bool sockptr_is_kernel(sockptr_t sockptr)
-{
-       return (unsigned long)sockptr.kernel >= TASK_SIZE;
-}
-
-static inline sockptr_t KERNEL_SOCKPTR(void *p)
-{
-       return (sockptr_t) { .kernel = p };
-}
-#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 typedef struct {
        union {
                void            *kernel;
@@ -45,15 +28,10 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p)
 {
        return (sockptr_t) { .kernel = p, .is_kernel = true };
 }
-#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
 
-static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p,
-               size_t size)
+static inline sockptr_t USER_SOCKPTR(void __user *p)
 {
-       if (!access_ok(p, size))
-               return -EFAULT;
-       *sp = (sockptr_t) { .user = p };
-       return 0;
+       return (sockptr_t) { .user = p };
 }
 
 static inline bool sockptr_is_null(sockptr_t sockptr)
index 320c672..4af31bb 100644 (file)
@@ -124,4 +124,78 @@ rpcrdma_decode_buffer_size(u8 val)
        return ((unsigned int)val + 1) << 10;
 }
 
+/**
+ * xdr_encode_rdma_segment - Encode contents of an RDMA segment
+ * @p: Pointer into a send buffer
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ *   Pointer to the XDR position that follows the encoded RDMA segment
+ */
+static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle,
+                                             u32 length, u64 offset)
+{
+       *p++ = cpu_to_be32(handle);
+       *p++ = cpu_to_be32(length);
+       return xdr_encode_hyper(p, offset);
+}
+
+/**
+ * xdr_encode_read_segment - Encode contents of a Read segment
+ * @p: Pointer into a send buffer
+ * @position: The position to encode
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ *   Pointer to the XDR position that follows the encoded Read segment
+ */
+static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position,
+                                             u32 handle, u32 length,
+                                             u64 offset)
+{
+       *p++ = cpu_to_be32(position);
+       return xdr_encode_rdma_segment(p, handle, length, offset);
+}
+
+/**
+ * xdr_decode_rdma_segment - Decode contents of an RDMA segment
+ * @p: Pointer to the undecoded RDMA segment
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ *   Pointer to the XDR item that follows the RDMA segment
+ */
+static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle,
+                                             u32 *length, u64 *offset)
+{
+       *handle = be32_to_cpup(p++);
+       *length = be32_to_cpup(p++);
+       return xdr_decode_hyper(p, offset);
+}
+
+/**
+ * xdr_decode_read_segment - Decode contents of a Read segment
+ * @p: Pointer to the undecoded Read segment
+ * @position: Upon return, the segment's position
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ *   Pointer to the XDR item that follows the Read segment
+ */
+static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position,
+                                             u32 *handle, u32 *length,
+                                             u64 *offset)
+{
+       *position = be32_to_cpup(p++);
+       return xdr_decode_rdma_segment(p, handle, length, offset);
+}
+
 #endif                         /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h
new file mode 100644 (file)
index 0000000..be24ab2
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef RPC_RDMA_CID_H
+#define RPC_RDMA_CID_H
+
+/*
+ * The rpc_rdma_cid struct records completion ID information. A
+ * completion ID matches an incoming Send or Receive completion
+ * to a Completion Queue and to a previous ib_post_*(). The ID
+ * can then be displayed in an error message or recorded in a
+ * trace record.
+ *
+ * This struct is shared between the server and client RPC/RDMA
+ * transport implementations.
+ */
+struct rpc_rdma_cid {
+       u32                     ci_queue_id;
+       int                     ci_completion_id;
+};
+
+#endif /* RPC_RDMA_CID_H */
index 7ed8262..9dc3a3b 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/rpc_rdma.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 
@@ -109,6 +110,8 @@ struct svcxprt_rdma {
        struct work_struct   sc_work;
 
        struct llist_head    sc_recv_ctxts;
+
+       atomic_t             sc_completion_ids;
 };
 /* sc_flags */
 #define RDMAXPRT_CONN_PENDING  3
@@ -129,6 +132,7 @@ struct svc_rdma_recv_ctxt {
        struct list_head        rc_list;
        struct ib_recv_wr       rc_recv_wr;
        struct ib_cqe           rc_cqe;
+       struct rpc_rdma_cid     rc_cid;
        struct ib_sge           rc_recv_sge;
        void                    *rc_recv_buf;
        struct xdr_buf          rc_arg;
@@ -147,6 +151,8 @@ struct svc_rdma_recv_ctxt {
 
 struct svc_rdma_send_ctxt {
        struct list_head        sc_list;
+       struct rpc_rdma_cid     sc_cid;
+
        struct ib_send_wr       sc_send_wr;
        struct ib_cqe           sc_cqe;
        struct xdr_buf          sc_hdrbuf;
@@ -190,20 +196,21 @@ extern struct svc_rdma_send_ctxt *
                svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
 extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
                                   struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
+extern int svc_rdma_send(struct svcxprt_rdma *rdma,
+                        struct svc_rdma_send_ctxt *ctxt);
 extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
                                  struct svc_rdma_send_ctxt *sctxt,
                                  const struct svc_rdma_recv_ctxt *rctxt,
                                  struct xdr_buf *xdr);
+extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
+                                   struct svc_rdma_send_ctxt *sctxt,
+                                   struct svc_rdma_recv_ctxt *rctxt,
+                                   int status);
 extern int svc_rdma_sendto(struct svc_rqst *);
 extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
                                 unsigned int length);
 
 /* svc_rdma_transport.c */
-extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
-extern void svc_sq_reap(struct svcxprt_rdma *);
-extern void svc_rq_reap(struct svcxprt_rdma *);
-
 extern struct svc_xprt_class svc_rdma_class;
 #ifdef CONFIG_SUNRPC_BACKCHANNEL
 extern struct svc_xprt_class svc_rdma_bc_class;
index 22c207b..5a6a81b 100644 (file)
@@ -475,6 +475,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr,
 }
 
 /**
+ * xdr_item_is_absent - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ *   %true if the following XDR item is absent
+ *   %false if the following XDR item is present
+ */
+static inline bool xdr_item_is_absent(const __be32 *p)
+{
+       return *p == xdr_zero;
+}
+
+/**
+ * xdr_item_is_present - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ *   %true if the following XDR item is present
+ *   %false if the following XDR item is absent
+ */
+static inline bool xdr_item_is_present(const __be32 *p)
+{
+       return *p != xdr_zero;
+}
+
+/**
  * xdr_stream_decode_u32 - Decode a 32-bit integer
  * @xdr: pointer to xdr_stream
  * @ptr: location to store integer
index e64bd82..a603d48 100644 (file)
@@ -101,6 +101,7 @@ struct rpc_rqst {
                                                         * used in the softirq.
                                                         */
        unsigned long           rq_majortimeo;  /* major timeout alarm */
+       unsigned long           rq_minortimeo;  /* minor timeout alarm */
        unsigned long           rq_timeout;     /* Current timeout value */
        ktime_t                 rq_rtt;         /* round-trip time */
        unsigned int            rq_retries;     /* # of retries */
index b960098..cb9afad 100644 (file)
@@ -453,6 +453,8 @@ extern bool hibernation_available(void);
 asmlinkage int swsusp_save(void);
 extern struct pbe *restore_pblist;
 int pfn_is_nosave(unsigned long pfn);
+
+int hibernate_quiet_exec(int (*func)(void *data), void *data);
 #else /* CONFIG_HIBERNATION */
 static inline void register_nosave_region(unsigned long b, unsigned long e) {}
 static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
@@ -464,6 +466,10 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
 static inline bool hibernation_available(void) { return false; }
+
+static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
+       return -ENOTSUPP;
+}
 #endif /* CONFIG_HIBERNATION */
 
 #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
index 7eb59bc..6610469 100644 (file)
@@ -352,7 +352,7 @@ extern void deactivate_page(struct page *page);
 extern void mark_page_lazyfree(struct page *page);
 extern void swap_setup(void);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
+extern void lru_cache_add_inactive_or_unevictable(struct page *page,
                                                struct vm_area_struct *vma);
 
 /* linux/mm/vmscan.c */
@@ -414,9 +414,14 @@ extern struct address_space *swapper_spaces[];
 extern unsigned long total_swapcache_pages(void);
 extern void show_swap_cache_info(void);
 extern int add_to_swap(struct page *page);
-extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
-extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
+extern void *get_shadow_from_swap_cache(swp_entry_t entry);
+extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
+                       gfp_t gfp, void **shadowp);
+extern void __delete_from_swap_cache(struct page *page,
+                       swp_entry_t entry, void *shadow);
 extern void delete_from_swap_cache(struct page *);
+extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
+                               unsigned long end);
 extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
 extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -569,14 +574,19 @@ static inline int add_to_swap(struct page *page)
        return 0;
 }
 
+static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       return NULL;
+}
+
 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
-                                                       gfp_t gfp_mask)
+                                       gfp_t gfp_mask, void **shadowp)
 {
        return -1;
 }
 
 static inline void __delete_from_swap_cache(struct page *page,
-                                                       swp_entry_t entry)
+                                       swp_entry_t entry, void *shadow)
 {
 }
 
@@ -584,6 +594,11 @@ static inline void delete_from_swap_cache(struct page *page)
 {
 }
 
+static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
+                               unsigned long end)
+{
+}
+
 static inline int page_swapcount(struct page *page)
 {
        return 0;
index a2429d3..75ac7f8 100644 (file)
@@ -47,7 +47,6 @@ struct stat64;
 struct statfs;
 struct statfs64;
 struct statx;
-struct __sysctl_args;
 struct sysinfo;
 struct timespec;
 struct __kernel_old_timeval;
@@ -263,7 +262,7 @@ static inline void addr_limit_user_check(void)
                return;
 #endif
 
-       if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
+       if (CHECK_DATA_CORRUPTION(uaccess_kernel(),
                                  "Invalid address limit on user-mode return"))
                force_sig(SIGKILL);
 
@@ -1117,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
 asmlinkage long sys_bdflush(int func, long data);
 asmlinkage long sys_oldumount(char __user *name);
 asmlinkage long sys_uselib(const char __user *library);
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
 asmlinkage long sys_sysfs(int option,
                                unsigned long arg1, unsigned long arg2);
 asmlinkage long sys_fork(void);
index 50bb7f3..51298a4 100644 (file)
@@ -74,15 +74,13 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
  * sysctl names can be mirrored automatically under /proc/sys.  The
  * procname supplied controls /proc naming.
  *
- * The table's mode will be honoured both for sys_sysctl(2) and
- * proc-fs access.
+ * The table's mode will be honoured for proc-fs access.
  *
  * Leaf nodes in the sysctl tree will be represented by a single file
  * under /proc; non-leaf nodes will be represented by directories.  A
  * null procname disables /proc mirroring at this node.
  *
- * sysctl(2) can automatically manage read and write requests through
- * the sysctl table.  The data and maxlen fields of the ctl_table
+ * The data and maxlen fields of the ctl_table
  * struct enable minimal validation of the values being written to be
  * performed, and the mode field allows minimal authentication.
  * 
index 4c325bf..b142cb5 100644 (file)
@@ -3,7 +3,6 @@
 #define _LINUX_TIME_H
 
 # include <linux/cache.h>
-# include <linux/seqlock.h>
 # include <linux/math64.h>
 # include <linux/time64.h>
 
index 0a76ddc..94b2854 100644 (file)
@@ -6,11 +6,27 @@
 #include <linux/sched.h>
 #include <linux/thread_info.h>
 
-#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
-
 #include <asm/uaccess.h>
 
 /*
+ * Force the uaccess routines to be wired up for actual userspace access,
+ * overriding any possible set_fs(KERNEL_DS) still lingering around.  Undone
+ * using force_uaccess_end below.
+ */
+static inline mm_segment_t force_uaccess_begin(void)
+{
+       mm_segment_t fs = get_fs();
+
+       set_fs(USER_DS);
+       return fs;
+}
+
+static inline void force_uaccess_end(mm_segment_t oldfs)
+{
+       set_fs(oldfs);
+}
+
+/*
  * Architectures should provide two primitives (raw_copy_{to,from}_user())
  * and get rid of their private instances of copy_{to,from}_user() and
  * __copy_{to,from}_user{,_inatomic}().
index 239db79..eae0bfd 100644 (file)
@@ -28,17 +28,28 @@ struct vdpa_notification_area {
 };
 
 /**
+ * vDPA vq_state definition
+ * @avail_index: available index
+ */
+struct vdpa_vq_state {
+       u16     avail_index;
+};
+
+/**
  * vDPA device - representation of a vDPA device
  * @dev: underlying device
  * @dma_dev: the actual device that is performing DMA
  * @config: the configuration ops for this device.
  * @index: device index
+ * @features_valid: were features initialized? for legacy guests
  */
 struct vdpa_device {
        struct device dev;
        struct device *dma_dev;
        const struct vdpa_config_ops *config;
        unsigned int index;
+       bool features_valid;
+       int nvqs;
 };
 
 /**
@@ -77,16 +88,22 @@ struct vdpa_device {
  * @set_vq_state:              Set the state for a virtqueue
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
- *                             @state: virtqueue state (last_avail_idx)
+ *                             @state: pointer to set virtqueue state (last_avail_idx)
  *                             Returns integer: success (0) or error (< 0)
  * @get_vq_state:              Get the state for a virtqueue
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
- *                             Returns virtqueue state (last_avail_idx)
+ *                             @state: pointer to returned state (last_avail_idx)
  * @get_vq_notification:       Get the notification area for a virtqueue
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
  *                             Returns the notifcation area
+ * @get_vq_irq:                        Get the irq number of a virtqueue (optional,
+ *                             but must implemented if require vq irq offloading)
+ *                             @vdev: vdpa device
+ *                             @idx: virtqueue index
+ *                             Returns int: irq number of a virtqueue,
+ *                             negative number if no irq assigned.
  * @get_vq_align:              Get the virtqueue align requirement
  *                             for the device
  *                             @vdev: vdpa device
@@ -174,10 +191,14 @@ struct vdpa_config_ops {
                          struct vdpa_callback *cb);
        void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
        bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
-       int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
-       u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+       int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
+                           const struct vdpa_vq_state *state);
+       int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
+                           struct vdpa_vq_state *state);
        struct vdpa_notification_area
        (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
+       /* vq irq is not expected to be changed once DRIVER_OK is set */
+       int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
 
        /* Device ops */
        u32 (*get_vq_align)(struct vdpa_device *vdev);
@@ -208,11 +229,12 @@ struct vdpa_config_ops {
 
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
+                                       int nvqs,
                                        size_t size);
 
-#define vdpa_alloc_device(dev_struct, member, parent, config)   \
+#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs)   \
                          container_of(__vdpa_alloc_device( \
-                                      parent, config, \
+                                      parent, config, nvqs, \
                                       sizeof(dev_struct) + \
                                       BUILD_BUG_ON_ZERO(offsetof( \
                                       dev_struct, member))), \
@@ -266,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
 {
        return vdev->dma_dev;
 }
+
+static inline void vdpa_reset(struct vdpa_device *vdev)
+{
+        const struct vdpa_config_ops *ops = vdev->config;
+
+       vdev->features_valid = false;
+        ops->set_status(vdev, 0);
+}
+
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+        const struct vdpa_config_ops *ops = vdev->config;
+
+       vdev->features_valid = true;
+        return ops->set_features(vdev, features);
+}
+
+
+static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
+                                  void *buf, unsigned int len)
+{
+        const struct vdpa_config_ops *ops = vdev->config;
+
+       /*
+        * Config accesses aren't supposed to trigger before features are set.
+        * If it does happen we assume a legacy guest.
+        */
+       if (!vdev->features_valid)
+               vdpa_set_features(vdev, 0);
+       ops->get_config(vdev, offset, buf, len);
+}
+
 #endif /* _LINUX_VDPA_H */
index 553b34c..977caf9 100644 (file)
@@ -110,12 +110,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
 }
 
 #if defined(CONFIG_VGA_ARB)
-extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
-#else
-static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
-#endif
-
-#if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
 #else
 #define vga_put(pdev, rsrc)
index 16c0ed6..219037f 100644 (file)
@@ -57,6 +57,7 @@
 #define __LINUX_VIDEODEV2_H
 
 #include <linux/time.h>     /* need struct timeval */
+#include <linux/kernel.h>
 #include <uapi/linux/videodev2.h>
 
 #endif /* __LINUX_VIDEODEV2_H */
index 5d2d312..ea72247 100644 (file)
@@ -11,9 +11,9 @@
 
 #include <linux/types.h>
 struct virtio_caif_transf_config {
-       u16 headroom;
-       u16 tailroom;
-       u32 mtu;
+       __virtio16 headroom;
+       __virtio16 tailroom;
+       __virtio32 mtu;
        u8 reserved[4];
 };
 
index bb4cc49..8fe857e 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/bug.h>
 #include <linux/virtio.h>
 #include <linux/virtio_byteorder.h>
+#include <linux/compiler_types.h>
 #include <uapi/linux/virtio_config.h>
 
 struct irq_affinity;
@@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
 }
 
 /**
- * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * virtio_has_dma_quirk - determine whether this device has the DMA quirk
  * @vdev: the device
  */
-static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
 {
        /*
         * Note the reverse polarity of the quirk feature (compared to most
         * other features), this is for compatibility with legacy systems.
         */
-       return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+       return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
 }
 
 static inline
@@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
        return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
 }
 
+#define virtio_to_cpu(vdev, x) \
+       _Generic((x), \
+               __u8: (x), \
+               __virtio16: virtio16_to_cpu((vdev), (x)), \
+               __virtio32: virtio32_to_cpu((vdev), (x)), \
+               __virtio64: virtio64_to_cpu((vdev), (x)) \
+               )
+
+#define cpu_to_virtio(vdev, x, m) \
+       _Generic((m), \
+               __u8: (x), \
+               __virtio16: cpu_to_virtio16((vdev), (x)), \
+               __virtio32: cpu_to_virtio32((vdev), (x)), \
+               __virtio64: cpu_to_virtio64((vdev), (x)) \
+               )
+
+#define __virtio_native_type(structname, member) \
+       typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
+
 /* Config space accessors. */
 #define virtio_cread(vdev, structname, member, ptr)                    \
        do {                                                            \
+               typeof(((structname*)0)->member) virtio_cread_v;        \
+                                                                       \
                might_sleep();                                          \
-               /* Must match the member's type, and be integer */      \
-               if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
-                       (*ptr) = 1;                                     \
+               /* Sanity check: must match the member's type */        \
+               typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
                                                                        \
-               switch (sizeof(*ptr)) {                                 \
+               switch (sizeof(virtio_cread_v)) {                       \
                case 1:                                                 \
-                       *(ptr) = virtio_cread8(vdev,                    \
-                                              offsetof(structname, member)); \
-                       break;                                          \
                case 2:                                                 \
-                       *(ptr) = virtio_cread16(vdev,                   \
-                                               offsetof(structname, member)); \
-                       break;                                          \
                case 4:                                                 \
-                       *(ptr) = virtio_cread32(vdev,                   \
-                                               offsetof(structname, member)); \
-                       break;                                          \
-               case 8:                                                 \
-                       *(ptr) = virtio_cread64(vdev,                   \
-                                               offsetof(structname, member)); \
+                       vdev->config->get((vdev),                       \
+                                         offsetof(structname, member), \
+                                         &virtio_cread_v,              \
+                                         sizeof(virtio_cread_v));      \
                        break;                                          \
                default:                                                \
-                       BUG();                                          \
+                       __virtio_cread_many((vdev),                     \
+                                         offsetof(structname, member), \
+                                         &virtio_cread_v,              \
+                                         1,                            \
+                                         sizeof(virtio_cread_v));      \
+                       break;                                          \
                }                                                       \
+               *(ptr) = virtio_to_cpu(vdev, virtio_cread_v);           \
        } while(0)
 
 /* Config space accessors. */
 #define virtio_cwrite(vdev, structname, member, ptr)                   \
        do {                                                            \
+               typeof(((structname*)0)->member) virtio_cwrite_v =      \
+                       cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
+                                                                       \
+               might_sleep();                                          \
+               /* Sanity check: must match the member's type */        \
+               typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
+                                                                       \
+               vdev->config->set((vdev), offsetof(structname, member), \
+                                 &virtio_cwrite_v,                     \
+                                 sizeof(virtio_cwrite_v));             \
+       } while(0)
+
+/*
+ * Nothing virtio-specific about these, but let's worry about generalizing
+ * these later.
+ */
+#define virtio_le_to_cpu(x) \
+       _Generic((x), \
+               __u8: (u8)(x), \
+                __le16: (u16)le16_to_cpu(x), \
+                __le32: (u32)le32_to_cpu(x), \
+                __le64: (u64)le64_to_cpu(x) \
+               )
+
+#define virtio_cpu_to_le(x, m) \
+       _Generic((m), \
+                __u8: (x), \
+                __le16: cpu_to_le16(x), \
+                __le32: cpu_to_le32(x), \
+                __le64: cpu_to_le64(x) \
+               )
+
+/* LE (e.g. modern) Config space accessors. */
+#define virtio_cread_le(vdev, structname, member, ptr)                 \
+       do {                                                            \
+               typeof(((structname*)0)->member) virtio_cread_v;        \
+                                                                       \
                might_sleep();                                          \
-               /* Must match the member's type, and be integer */      \
-               if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
-                       BUG_ON((*ptr) == 1);                            \
+               /* Sanity check: must match the member's type */        \
+               typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
                                                                        \
-               switch (sizeof(*ptr)) {                                 \
+               switch (sizeof(virtio_cread_v)) {                       \
                case 1:                                                 \
-                       virtio_cwrite8(vdev,                            \
-                                      offsetof(structname, member),    \
-                                      *(ptr));                         \
-                       break;                                          \
                case 2:                                                 \
-                       virtio_cwrite16(vdev,                           \
-                                       offsetof(structname, member),   \
-                                       *(ptr));                        \
-                       break;                                          \
                case 4:                                                 \
-                       virtio_cwrite32(vdev,                           \
-                                       offsetof(structname, member),   \
-                                       *(ptr));                        \
-                       break;                                          \
-               case 8:                                                 \
-                       virtio_cwrite64(vdev,                           \
-                                       offsetof(structname, member),   \
-                                       *(ptr));                        \
+                       vdev->config->get((vdev),                       \
+                                         offsetof(structname, member), \
+                                         &virtio_cread_v,              \
+                                         sizeof(virtio_cread_v));      \
                        break;                                          \
                default:                                                \
-                       BUG();                                          \
+                       __virtio_cread_many((vdev),                     \
+                                         offsetof(structname, member), \
+                                         &virtio_cread_v,              \
+                                         1,                            \
+                                         sizeof(virtio_cread_v));      \
+                       break;                                          \
                }                                                       \
+               *(ptr) = virtio_le_to_cpu(virtio_cread_v);              \
+       } while(0)
+
+#define virtio_cwrite_le(vdev, structname, member, ptr)                        \
+       do {                                                            \
+               typeof(((structname*)0)->member) virtio_cwrite_v =      \
+                       virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
+                                                                       \
+               might_sleep();                                          \
+               /* Sanity check: must match the member's type */        \
+               typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
+                                                                       \
+               vdev->config->set((vdev), offsetof(structname, member), \
+                                 &virtio_cwrite_v,                     \
+                                 sizeof(virtio_cwrite_v));             \
        } while(0)
 
+
 /* Read @count fields, @bytes each. */
 static inline void __virtio_cread_many(struct virtio_device *vdev,
                                       unsigned int offset,
@@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev,
 static inline u16 virtio_cread16(struct virtio_device *vdev,
                                 unsigned int offset)
 {
-       u16 ret;
+       __virtio16 ret;
 
        might_sleep();
        vdev->config->get(vdev, offset, &ret, sizeof(ret));
-       return virtio16_to_cpu(vdev, (__force __virtio16)ret);
+       return virtio16_to_cpu(vdev, ret);
 }
 
 static inline void virtio_cwrite16(struct virtio_device *vdev,
                                   unsigned int offset, u16 val)
 {
+       __virtio16 v;
+
        might_sleep();
-       val = (__force u16)cpu_to_virtio16(vdev, val);
-       vdev->config->set(vdev, offset, &val, sizeof(val));
+       v = cpu_to_virtio16(vdev, val);
+       vdev->config->set(vdev, offset, &v, sizeof(v));
 }
 
 static inline u32 virtio_cread32(struct virtio_device *vdev,
                                 unsigned int offset)
 {
-       u32 ret;
+       __virtio32 ret;
 
        might_sleep();
        vdev->config->get(vdev, offset, &ret, sizeof(ret));
-       return virtio32_to_cpu(vdev, (__force __virtio32)ret);
+       return virtio32_to_cpu(vdev, ret);
 }
 
 static inline void virtio_cwrite32(struct virtio_device *vdev,
                                   unsigned int offset, u32 val)
 {
+       __virtio32 v;
+
        might_sleep();
-       val = (__force u32)cpu_to_virtio32(vdev, val);
-       vdev->config->set(vdev, offset, &val, sizeof(val));
+       v = cpu_to_virtio32(vdev, val);
+       vdev->config->set(vdev, offset, &v, sizeof(v));
 }
 
 static inline u64 virtio_cread64(struct virtio_device *vdev,
                                 unsigned int offset)
 {
-       u64 ret;
+       __virtio64 ret;
+
        __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
-       return virtio64_to_cpu(vdev, (__force __virtio64)ret);
+       return virtio64_to_cpu(vdev, ret);
 }
 
 static inline void virtio_cwrite64(struct virtio_device *vdev,
                                   unsigned int offset, u64 val)
 {
+       __virtio64 v;
+
        might_sleep();
-       val = (__force u64)cpu_to_virtio64(vdev, val);
-       vdev->config->set(vdev, offset, &val, sizeof(val));
+       v = cpu_to_virtio64(vdev, val);
+       vdev->config->set(vdev, offset, &v, sizeof(v));
 }
 
 /* Conditional config space accessors. */
@@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
                _r;                                                     \
        })
 
+/* Conditional config space accessors. */
+#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr)   \
+       ({                                                              \
+               int _r = 0;                                             \
+               if (!virtio_has_feature(vdev, fbit))                    \
+                       _r = -ENOENT;                                   \
+               else                                                    \
+                       virtio_cread_le((vdev), structname, member, ptr); \
+               _r;                                                     \
+       })
 #endif /* _LINUX_VIRTIO_CONFIG_H */
index 3dc70ad..b485b13 100644 (file)
@@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers)
                dma_wmb();
 }
 
-static inline void virtio_store_mb(bool weak_barriers,
-                                  __virtio16 *p, __virtio16 v)
-{
-       if (weak_barriers) {
-               virt_store_mb(*p, v);
-       } else {
-               WRITE_ONCE(*p, v);
-               mb();
-       }
-}
+#define virtio_store_mb(weak_barriers, p, v) \
+do { \
+       if (weak_barriers) { \
+               virt_store_mb(*p, v); \
+       } else { \
+               WRITE_ONCE(*p, v); \
+               mb(); \
+       } \
+} while (0) \
 
 struct virtio_device;
 struct virtqueue;
index 24fc7c3..2e6ca53 100644 (file)
@@ -56,6 +56,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #endif
 #ifdef CONFIG_MIGRATION
                PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+               THP_MIGRATION_SUCCESS,
+               THP_MIGRATION_FAIL,
+               THP_MIGRATION_SPLIT,
 #endif
 #ifdef CONFIG_COMPACTION
                COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
index 1464ce6..9b19e6b 100644 (file)
@@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
 extern int watchdog_register_device(struct watchdog_device *);
 extern void watchdog_unregister_device(struct watchdog_device *);
 
+int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int);
+
 /* devres register variant */
 int devm_watchdog_register_device(struct device *dev, struct watchdog_device *);
 
index e497e62..3f49696 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * TI Wakeup M3 for AMx3 SoCs Power Management Routines
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
  * Dave Gerlach <d-gerlach@ti.com>
  *
  * This program is free software; you can redistribute it and/or
index d755425..850424e 100644 (file)
@@ -48,14 +48,6 @@ struct ww_acquire_ctx {
 #endif
 };
 
-struct ww_mutex {
-       struct mutex base;
-       struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
-       struct ww_class *ww_class;
-#endif
-};
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
                , .ww_class = class
index c5afaf8..10b4dc2 100644 (file)
@@ -52,14 +52,18 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
 int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
 int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
 int __vfs_removexattr(struct dentry *, const char *);
+int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
 int vfs_removexattr(struct dentry *, const char *);
 
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
 ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
                           char **xattr_value, size_t size, gfp_t flags);
 
+int xattr_supported_namespace(struct inode *inode, const char *prefix);
+
 static inline const char *xattr_prefix(const struct xattr_handler *handler)
 {
        return handler->prefix ?: handler->name;
index 52b073f..df42511 100644 (file)
@@ -34,7 +34,7 @@
  * ("BSD").
  *
  * You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
  * - xxHash source repository: https://github.com/Cyan4973/xxHash
  */
 
index 64cffa6..9884c84 100644 (file)
@@ -2,7 +2,7 @@
  * XZ decompressor
  *
  * Authors: Lasse Collin <lasse.collin@tukaani.org>
- *          Igor Pavlov <http://7-zip.org/>
+ *          Igor Pavlov <https://7-zip.org/>
  *
  * This file has been put into the public domain.
  * You can do whatever you want with this file.
@@ -28,7 +28,7 @@
  * enum xz_mode - Operation mode
  *
  * @XZ_SINGLE:              Single-call mode. This uses less RAM than
- *                          than multi-call modes, because the LZMA2
+ *                          multi-call modes, because the LZMA2
  *                          dictionary doesn't need to be allocated as
  *                          part of the decoder state. All required data
  *                          structures are allocated at initialization,
index c757d84..78ede94 100644 (file)
@@ -23,7 +23,7 @@
 
 
   The data format used by the zlib library is described by RFCs (Request for
-  Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
+  Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt
   (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
 */
 
index 1e209ce..aa8893c 100644 (file)
@@ -304,6 +304,10 @@ void inet_csk_listen_stop(struct sock *sk);
 
 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
+/* update the fast reuse flag when adding a socket */
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+                              struct sock *sk);
+
 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 
 #define TCP_PINGPONG_THRESH    3
index c7bfddf..439379c 100644 (file)
@@ -298,7 +298,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
 
 extern struct hlist_nulls_head *nf_conntrack_hash;
 extern unsigned int nf_conntrack_htable_size;
-extern seqcount_t nf_conntrack_generation;
+extern seqcount_spinlock_t nf_conntrack_generation;
 extern unsigned int nf_conntrack_max;
 
 /* must be called with rcu read lock held */
index dbf5c79..eab6c75 100644 (file)
@@ -1672,6 +1672,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
 void tcp_fastopen_ctx_destroy(struct net *net);
 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
                              void *primary_key, void *backup_key);
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+                           u64 *key);
 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              struct request_sock *req,
index e223c4f..9c2eff6 100644 (file)
@@ -22,7 +22,7 @@ static inline int read_aux_reg(u32 r)
 
 /*
  * function helps elide unused variable warning
- * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
+ * see: https://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
  */
 static inline void write_aux_reg(u32 r, u32 v)
 {
index 75bee29..b8feba7 100644 (file)
@@ -43,7 +43,7 @@ static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
  * From iscsi_target_transport.c
  */
 
-extern int iscsit_register_transport(struct iscsit_transport *);
+extern void iscsit_register_transport(struct iscsit_transport *);
 extern void iscsit_unregister_transport(struct iscsit_transport *);
 extern struct iscsit_transport *iscsit_get_transport(int);
 extern void iscsit_put_transport(struct iscsit_transport *);
index cc41d69..4c8b99e 100644 (file)
@@ -746,24 +746,29 @@ TRACE_EVENT(ext4_mb_release_group_pa,
 );
 
 TRACE_EVENT(ext4_discard_preallocations,
-       TP_PROTO(struct inode *inode),
+       TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
 
-       TP_ARGS(inode),
+       TP_ARGS(inode, len, needed),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
-               __field(        ino_t,  ino                     )
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        unsigned int,   len             )
+               __field(        unsigned int,   needed          )
 
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
+               __entry->len    = len;
+               __entry->needed = needed;
        ),
 
-       TP_printk("dev %d,%d ino %lu",
+       TP_printk("dev %d,%d ino %lu len: %u needed %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino)
+                 (unsigned long) __entry->ino, __entry->len,
+                 __entry->needed)
 );
 
 TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1312,18 +1317,34 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
        TP_ARGS(sb, group)
 );
 
-DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
 
        TP_PROTO(struct super_block *sb, unsigned long group),
 
        TP_ARGS(sb, group)
 );
 
-DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+TRACE_EVENT(ext4_read_block_bitmap_load,
+       TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch),
 
-       TP_PROTO(struct super_block *sb, unsigned long group),
+       TP_ARGS(sb, group, prefetch),
 
-       TP_ARGS(sb, group)
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u32,  group                   )
+               __field(        bool,   prefetch                )
+
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->group  = group;
+               __entry->prefetch = prefetch;
+       ),
+
+       TP_printk("dev %d,%d group %u prefetch %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->group, __entry->prefetch)
 );
 
 TRACE_EVENT(ext4_direct_IO_enter,
@@ -2726,6 +2747,50 @@ TRACE_EVENT(ext4_error,
                  __entry->function, __entry->line)
 );
 
+TRACE_EVENT(ext4_prefetch_bitmaps,
+           TP_PROTO(struct super_block *sb, ext4_group_t group,
+                    ext4_group_t next, unsigned int prefetch_ios),
+
+       TP_ARGS(sb, group, next, prefetch_ios),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u32,  group                   )
+               __field(        __u32,  next                    )
+               __field(        __u32,  ios                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->group  = group;
+               __entry->next   = next;
+               __entry->ios    = prefetch_ios;
+       ),
+
+       TP_printk("dev %d,%d group %u next %u ios %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->group, __entry->next, __entry->ios)
+);
+
+TRACE_EVENT(ext4_lazy_itable_init,
+           TP_PROTO(struct super_block *sb, ext4_group_t group),
+
+       TP_ARGS(sb, group),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        __u32,  group                   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->group  = group;
+       ),
+
+       TP_printk("dev %d,%d group %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group)
+);
+
 #endif /* _TRACE_EXT4_H */
 
 /* This part must be outside protection */
index 8639ab9..8a1c131 100644 (file)
@@ -1891,6 +1891,69 @@ TRACE_EVENT(f2fs_iostat,
                __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
 );
 
+TRACE_EVENT(f2fs_bmap,
+
+       TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock),
+
+       TP_ARGS(inode, lblock, pblock),
+
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(ino_t, ino)
+               __field(sector_t, lblock)
+               __field(sector_t, pblock)
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->lblock         = lblock;
+               __entry->pblock         = pblock;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld",
+               show_dev_ino(__entry),
+               (unsigned long long)__entry->lblock,
+               (unsigned long long)__entry->pblock)
+);
+
+TRACE_EVENT(f2fs_fiemap,
+
+       TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock,
+               unsigned long long len, unsigned int flags, int ret),
+
+       TP_ARGS(inode, lblock, pblock, len, flags, ret),
+
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(ino_t, ino)
+               __field(sector_t, lblock)
+               __field(sector_t, pblock)
+               __field(unsigned long long, len)
+               __field(unsigned int, flags)
+               __field(int, ret)
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->lblock         = lblock;
+               __entry->pblock         = pblock;
+               __entry->len            = len;
+               __entry->flags          = flags;
+               __entry->ret            = ret;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, "
+               "len:%llu, flags:%u, ret:%d",
+               show_dev_ino(__entry),
+               (unsigned long long)__entry->lblock,
+               (unsigned long long)__entry->pblock,
+               __entry->len,
+               __entry->flags,
+               __entry->ret)
+);
+
 #endif /* _TRACE_F2FS_H */
 
  /* This part must be outside protection */
index 705b33d..4d43439 100644 (file)
@@ -46,13 +46,18 @@ MIGRATE_REASON
 TRACE_EVENT(mm_migrate_pages,
 
        TP_PROTO(unsigned long succeeded, unsigned long failed,
-                enum migrate_mode mode, int reason),
+                unsigned long thp_succeeded, unsigned long thp_failed,
+                unsigned long thp_split, enum migrate_mode mode, int reason),
 
-       TP_ARGS(succeeded, failed, mode, reason),
+       TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
+               thp_split, mode, reason),
 
        TP_STRUCT__entry(
                __field(        unsigned long,          succeeded)
                __field(        unsigned long,          failed)
+               __field(        unsigned long,          thp_succeeded)
+               __field(        unsigned long,          thp_failed)
+               __field(        unsigned long,          thp_split)
                __field(        enum migrate_mode,      mode)
                __field(        int,                    reason)
        ),
@@ -60,13 +65,19 @@ TRACE_EVENT(mm_migrate_pages,
        TP_fast_assign(
                __entry->succeeded      = succeeded;
                __entry->failed         = failed;
+               __entry->thp_succeeded  = thp_succeeded;
+               __entry->thp_failed     = thp_failed;
+               __entry->thp_split      = thp_split;
                __entry->mode           = mode;
                __entry->reason         = reason;
        ),
 
-       TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
+       TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
                __entry->succeeded,
                __entry->failed,
+               __entry->thp_succeeded,
+               __entry->thp_failed,
+               __entry->thp_split,
                __print_symbolic(__entry->mode, MIGRATE_MODE),
                __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
index 32c10a5..9570a10 100644 (file)
@@ -307,6 +307,23 @@ TRACE_EVENT(urandom_read,
                  __entry->pool_left, __entry->input_left)
 );
 
+TRACE_EVENT(prandom_u32,
+
+       TP_PROTO(unsigned int ret),
+
+       TP_ARGS(ret),
+
+       TP_STRUCT__entry(
+               __field(   unsigned int, ret)
+       ),
+
+       TP_fast_assign(
+               __entry->ret = ret;
+       ),
+
+       TP_printk("ret=%u" , __entry->ret)
+);
+
 #endif /* _TRACE_RANDOM_H */
 
 /* This part must be outside protection */
index b9b51a4..ffdbe6f 100644 (file)
@@ -170,55 +170,144 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class,
 DEFINE_CTX_EVENT(init);
 DEFINE_CTX_EVENT(destroy);
 
+DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
+       TP_PROTO(
+               const struct svc_rqst *rqstp,
+               u32 maj_stat
+       ),
+
+       TP_ARGS(rqstp, maj_stat),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(u32, maj_stat)
+               __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+               __entry->maj_stat = maj_stat;
+               __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s xid=0x%08x maj_stat=%s",
+               __get_str(addr), __entry->xid,
+               __entry->maj_stat == 0 ?
+               "GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
+);
+
+#define DEFINE_SVC_GSSAPI_EVENT(name)                                  \
+       DEFINE_EVENT(rpcgss_svc_gssapi_class, rpcgss_svc_##name,        \
+                       TP_PROTO(                                       \
+                               const struct svc_rqst *rqstp,           \
+                               u32 maj_stat                            \
+                       ),                                              \
+                       TP_ARGS(rqstp, maj_stat))
+
+DEFINE_SVC_GSSAPI_EVENT(unwrap);
+DEFINE_SVC_GSSAPI_EVENT(mic);
+
+TRACE_EVENT(rpcgss_svc_unwrap_failed,
+       TP_PROTO(
+               const struct svc_rqst *rqstp
+       ),
+
+       TP_ARGS(rqstp),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
+               __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
+
+TRACE_EVENT(rpcgss_svc_seqno_bad,
+       TP_PROTO(
+               const struct svc_rqst *rqstp,
+               u32 expected,
+               u32 received
+       ),
+
+       TP_ARGS(rqstp, expected, received),
+
+       TP_STRUCT__entry(
+               __field(u32, expected)
+               __field(u32, received)
+               __field(u32, xid)
+               __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+       ),
+
+       TP_fast_assign(
+               __entry->expected = expected;
+               __entry->received = received;
+               __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+               __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+       ),
+
+       TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u",
+               __get_str(addr), __entry->xid,
+               __entry->expected, __entry->received)
+);
+
 TRACE_EVENT(rpcgss_svc_accept_upcall,
        TP_PROTO(
-               __be32 xid,
+               const struct svc_rqst *rqstp,
                u32 major_status,
                u32 minor_status
        ),
 
-       TP_ARGS(xid, major_status, minor_status),
+       TP_ARGS(rqstp, major_status, minor_status),
 
        TP_STRUCT__entry(
-               __field(u32, xid)
                __field(u32, minor_status)
                __field(unsigned long, major_status)
+               __field(u32, xid)
+               __string(addr, rqstp->rq_xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(xid);
                __entry->minor_status = minor_status;
                __entry->major_status = major_status;
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
+               __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
-               __entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" :
-                               show_gss_status(__entry->major_status),
+       TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
+               __get_str(addr), __entry->xid,
+               (__entry->major_status == 0) ? "GSS_S_COMPLETE" :
+                       show_gss_status(__entry->major_status),
                __entry->major_status, __entry->minor_status
        )
 );
 
-TRACE_EVENT(rpcgss_svc_accept,
+TRACE_EVENT(rpcgss_svc_authenticate,
        TP_PROTO(
-               __be32 xid,
-               size_t len
+               const struct svc_rqst *rqstp,
+               const struct rpc_gss_wire_cred *gc
        ),
 
-       TP_ARGS(xid, len),
+       TP_ARGS(rqstp, gc),
 
        TP_STRUCT__entry(
+               __field(u32, seqno)
                __field(u32, xid)
-               __field(size_t, len)
+               __string(addr, rqstp->rq_xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(xid);
-               __entry->len = len;
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
+               __entry->seqno = gc->gc_seq;
+               __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xid=0x%08x len=%zu",
-               __entry->xid, __entry->len
-       )
+       TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr),
+               __entry->xid, __entry->seqno)
 );
 
 
@@ -371,11 +460,11 @@ TRACE_EVENT(rpcgss_update_slack,
 
 DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
        TP_PROTO(
-               __be32 xid,
+               const struct svc_rqst *rqstp,
                u32 seqno
        ),
 
-       TP_ARGS(xid, seqno),
+       TP_ARGS(rqstp, seqno),
 
        TP_STRUCT__entry(
                __field(u32, xid)
@@ -383,25 +472,52 @@ DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(xid);
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
                __entry->seqno = seqno;
        ),
 
-       TP_printk("xid=0x%08x seqno=%u, request discarded",
+       TP_printk("xid=0x%08x seqno=%u",
                __entry->xid, __entry->seqno)
 );
 
 #define DEFINE_SVC_SEQNO_EVENT(name)                                   \
-       DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_##name,         \
+       DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_seqno_##name,   \
                        TP_PROTO(                                       \
-                               __be32 xid,                             \
+                               const struct svc_rqst *rqstp,           \
                                u32 seqno                               \
                        ),                                              \
-                       TP_ARGS(xid, seqno))
+                       TP_ARGS(rqstp, seqno))
 
-DEFINE_SVC_SEQNO_EVENT(large_seqno);
-DEFINE_SVC_SEQNO_EVENT(old_seqno);
+DEFINE_SVC_SEQNO_EVENT(large);
+DEFINE_SVC_SEQNO_EVENT(seen);
 
+TRACE_EVENT(rpcgss_svc_seqno_low,
+       TP_PROTO(
+               const struct svc_rqst *rqstp,
+               u32 seqno,
+               u32 min,
+               u32 max
+       ),
+
+       TP_ARGS(rqstp, seqno, min, max),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(u32, seqno)
+               __field(u32, min)
+               __field(u32, max)
+       ),
+
+       TP_fast_assign(
+               __entry->xid = be32_to_cpu(rqstp->rq_xid);
+               __entry->seqno = seqno;
+               __entry->min = min;
+               __entry->max = max;
+       ),
+
+       TP_printk("xid=0x%08x seqno=%u window=[%u..%u]",
+               __entry->xid, __entry->seqno, __entry->min, __entry->max)
+);
 
 /**
  ** gssd upcall related trace events
index 0f05a6e..abe9422 100644 (file)
@@ -11,6 +11,7 @@
 #define _TRACE_RPCRDMA_H
 
 #include <linux/scatterlist.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
 #include <linux/tracepoint.h>
 #include <trace/events/rdma.h>
 
  ** Event classes
  **/
 
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+       TP_PROTO(
+               const struct ib_wc *wc,
+               const struct rpc_rdma_cid *cid
+       ),
+
+       TP_ARGS(wc, cid),
+
+       TP_STRUCT__entry(
+               __field(u32, cq_id)
+               __field(int, completion_id)
+               __field(unsigned long, status)
+               __field(unsigned int, vendor_err)
+       ),
+
+       TP_fast_assign(
+               __entry->cq_id = cid->ci_queue_id;
+               __entry->completion_id = cid->ci_completion_id;
+               __entry->status = wc->status;
+               if (wc->status)
+                       __entry->vendor_err = wc->vendor_err;
+               else
+                       __entry->vendor_err = 0;
+       ),
+
+       TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+               __entry->cq_id, __entry->completion_id,
+               rdma_show_wc_status(__entry->status),
+               __entry->status, __entry->vendor_err
+       )
+);
+
+#define DEFINE_COMPLETION_EVENT(name)                                  \
+               DEFINE_EVENT(rpcrdma_completion_class, name,            \
+                               TP_PROTO(                               \
+                                       const struct ib_wc *wc,         \
+                                       const struct rpc_rdma_cid *cid  \
+                               ),                                      \
+                               TP_ARGS(wc, cid))
+
 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
        TP_PROTO(
                const struct rpcrdma_rep *rep
@@ -1328,13 +1369,16 @@ TRACE_DEFINE_ENUM(RDMA_ERROR);
 
 TRACE_EVENT(svcrdma_decode_rqst,
        TP_PROTO(
+               const struct svc_rdma_recv_ctxt *ctxt,
                __be32 *p,
                unsigned int hdrlen
        ),
 
-       TP_ARGS(p, hdrlen),
+       TP_ARGS(ctxt, p, hdrlen),
 
        TP_STRUCT__entry(
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(u32, xid)
                __field(u32, vers)
                __field(u32, proc)
@@ -1343,6 +1387,8 @@ TRACE_EVENT(svcrdma_decode_rqst,
        ),
 
        TP_fast_assign(
+               __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->rc_cid.ci_completion_id;
                __entry->xid = be32_to_cpup(p++);
                __entry->vers = be32_to_cpup(p++);
                __entry->credits = be32_to_cpup(p++);
@@ -1350,37 +1396,48 @@ TRACE_EVENT(svcrdma_decode_rqst,
                __entry->hdrlen = hdrlen;
        ),
 
-       TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+       TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+               __entry->cq_id, __entry->completion_id,
                __entry->xid, __entry->vers, __entry->credits,
                show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
 );
 
 TRACE_EVENT(svcrdma_decode_short_err,
        TP_PROTO(
+               const struct svc_rdma_recv_ctxt *ctxt,
                unsigned int hdrlen
        ),
 
-       TP_ARGS(hdrlen),
+       TP_ARGS(ctxt, hdrlen),
 
        TP_STRUCT__entry(
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(unsigned int, hdrlen)
        ),
 
        TP_fast_assign(
+               __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->rc_cid.ci_completion_id;
                __entry->hdrlen = hdrlen;
        ),
 
-       TP_printk("hdrlen=%u", __entry->hdrlen)
+       TP_printk("cq.id=%u cid=%d hdrlen=%u",
+               __entry->cq_id, __entry->completion_id,
+               __entry->hdrlen)
 );
 
 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
        TP_PROTO(
+               const struct svc_rdma_recv_ctxt *ctxt,
                __be32 *p
        ),
 
-       TP_ARGS(p),
+       TP_ARGS(ctxt, p),
 
        TP_STRUCT__entry(
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(u32, xid)
                __field(u32, vers)
                __field(u32, proc)
@@ -1388,13 +1445,16 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
        ),
 
        TP_fast_assign(
+               __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->rc_cid.ci_completion_id;
                __entry->xid = be32_to_cpup(p++);
                __entry->vers = be32_to_cpup(p++);
                __entry->credits = be32_to_cpup(p++);
                __entry->proc = be32_to_cpup(p);
        ),
 
-       TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
+       TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
+               __entry->cq_id, __entry->completion_id,
                __entry->xid, __entry->vers, __entry->credits, __entry->proc)
 );
 
@@ -1402,9 +1462,10 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
                DEFINE_EVENT(svcrdma_badreq_event,                      \
                             svcrdma_decode_##name##_err,               \
                                TP_PROTO(                               \
+                                       const struct svc_rdma_recv_ctxt *ctxt,  \
                                        __be32 *p                       \
                                ),                                      \
-                               TP_ARGS(p))
+                               TP_ARGS(ctxt, p))
 
 DEFINE_BADREQ_EVENT(badvers);
 DEFINE_BADREQ_EVENT(drop);
@@ -1716,7 +1777,7 @@ TRACE_EVENT(svcrdma_send_pullup,
        TP_printk("len=%u", __entry->len)
 );
 
-TRACE_EVENT(svcrdma_send_failed,
+TRACE_EVENT(svcrdma_send_err,
        TP_PROTO(
                const struct svc_rqst *rqst,
                int status
@@ -1727,167 +1788,127 @@ TRACE_EVENT(svcrdma_send_failed,
        TP_STRUCT__entry(
                __field(int, status)
                __field(u32, xid)
-               __field(const void *, xprt)
                __string(addr, rqst->rq_xprt->xpt_remotebuf)
        ),
 
        TP_fast_assign(
                __entry->status = status;
                __entry->xid = __be32_to_cpu(rqst->rq_xid);
-               __entry->xprt = rqst->rq_xprt;
                __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
        ),
 
-       TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
-               __entry->xprt, __get_str(addr),
+       TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
                __entry->xid, __entry->status
        )
 );
 
-DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
-       TP_PROTO(
-               const struct ib_wc *wc
-       ),
-
-       TP_ARGS(wc),
-
-       TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(unsigned int, status)
-               __field(unsigned int, vendor_err)
-       ),
-
-       TP_fast_assign(
-               __entry->cqe = wc->wr_cqe;
-               __entry->status = wc->status;
-               if (wc->status)
-                       __entry->vendor_err = wc->vendor_err;
-               else
-                       __entry->vendor_err = 0;
-       ),
-
-       TP_printk("cqe=%p status=%s (%u/0x%x)",
-               __entry->cqe, rdma_show_wc_status(__entry->status),
-               __entry->status, __entry->vendor_err
-       )
-);
-
-#define DEFINE_SENDCOMP_EVENT(name)                                    \
-               DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
-                               TP_PROTO(                               \
-                                       const struct ib_wc *wc          \
-                               ),                                      \
-                               TP_ARGS(wc))
-
 TRACE_EVENT(svcrdma_post_send,
        TP_PROTO(
-               const struct ib_send_wr *wr
+               const struct svc_rdma_send_ctxt *ctxt
        ),
 
-       TP_ARGS(wr),
+       TP_ARGS(ctxt),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(unsigned int, num_sge)
                __field(u32, inv_rkey)
        ),
 
        TP_fast_assign(
-               __entry->cqe = wr->wr_cqe;
+               const struct ib_send_wr *wr = &ctxt->sc_send_wr;
+
+               __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->sc_cid.ci_completion_id;
                __entry->num_sge = wr->num_sge;
                __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
                                        wr->ex.invalidate_rkey : 0;
        ),
 
-       TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
-               __entry->cqe, __entry->num_sge,
-               __entry->inv_rkey
+       TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+               __entry->cq_id, __entry->completion_id,
+               __entry->num_sge, __entry->inv_rkey
        )
 );
 
-DEFINE_SENDCOMP_EVENT(send);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
 
 TRACE_EVENT(svcrdma_post_recv,
        TP_PROTO(
-               const struct ib_recv_wr *wr,
-               int status
+               const struct svc_rdma_recv_ctxt *ctxt
        ),
 
-       TP_ARGS(wr, status),
+       TP_ARGS(ctxt),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(int, status)
+               __field(u32, cq_id)
+               __field(int, completion_id)
        ),
 
        TP_fast_assign(
-               __entry->cqe = wr->wr_cqe;
-               __entry->status = status;
+               __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+               __entry->completion_id = ctxt->rc_cid.ci_completion_id;
        ),
 
-       TP_printk("cqe=%p status=%d",
-               __entry->cqe, __entry->status
+       TP_printk("cq.id=%d cid=%d",
+               __entry->cq_id, __entry->completion_id
        )
 );
 
-TRACE_EVENT(svcrdma_wc_receive,
+DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
+
+TRACE_EVENT(svcrdma_rq_post_err,
        TP_PROTO(
-               const struct ib_wc *wc
+               const struct svcxprt_rdma *rdma,
+               int status
        ),
 
-       TP_ARGS(wc),
+       TP_ARGS(rdma, status),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(u32, byte_len)
-               __field(unsigned int, status)
-               __field(u32, vendor_err)
+               __field(int, status)
+               __string(addr, rdma->sc_xprt.xpt_remotebuf)
        ),
 
        TP_fast_assign(
-               __entry->cqe = wc->wr_cqe;
-               __entry->status = wc->status;
-               if (wc->status) {
-                       __entry->byte_len = 0;
-                       __entry->vendor_err = wc->vendor_err;
-               } else {
-                       __entry->byte_len = wc->byte_len;
-                       __entry->vendor_err = 0;
-               }
+               __entry->status = status;
+               __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
        ),
 
-       TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
-               __entry->cqe, __entry->byte_len,
-               rdma_show_wc_status(__entry->status),
-               __entry->status, __entry->vendor_err
+       TP_printk("addr=%s status=%d",
+               __get_str(addr), __entry->status
        )
 );
 
-TRACE_EVENT(svcrdma_post_rw,
+TRACE_EVENT(svcrdma_post_chunk,
        TP_PROTO(
-               const void *cqe,
+               const struct rpc_rdma_cid *cid,
                int sqecount
        ),
 
-       TP_ARGS(cqe, sqecount),
+       TP_ARGS(cid, sqecount),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(int, sqecount)
        ),
 
        TP_fast_assign(
-               __entry->cqe = cqe;
+               __entry->cq_id = cid->ci_queue_id;
+               __entry->completion_id = cid->ci_completion_id;
                __entry->sqecount = sqecount;
        ),
 
-       TP_printk("cqe=%p sqecount=%d",
-               __entry->cqe, __entry->sqecount
+       TP_printk("cq.id=%u cid=%d sqecount=%d",
+               __entry->cq_id, __entry->completion_id,
+               __entry->sqecount
        )
 );
 
-DEFINE_SENDCOMP_EVENT(read);
-DEFINE_SENDCOMP_EVENT(write);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
 
 TRACE_EVENT(svcrdma_qp_error,
        TP_PROTO(
index 6a12935..65d7dfb 100644 (file)
@@ -1250,15 +1250,34 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
 DEFINE_SVCXDRBUF_EVENT(recvfrom);
 DEFINE_SVCXDRBUF_EVENT(sendto);
 
+/*
+ * from include/linux/sunrpc/svc.h
+ */
+#define SVC_RQST_FLAG_LIST                                             \
+       svc_rqst_flag(SECURE)                                           \
+       svc_rqst_flag(LOCAL)                                            \
+       svc_rqst_flag(USEDEFERRAL)                                      \
+       svc_rqst_flag(DROPME)                                           \
+       svc_rqst_flag(SPLICE_OK)                                        \
+       svc_rqst_flag(VICTIM)                                           \
+       svc_rqst_flag(BUSY)                                             \
+       svc_rqst_flag(DATA)                                             \
+       svc_rqst_flag_end(AUTHERR)
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x)       TRACE_DEFINE_ENUM(RQ_##x);
+#define svc_rqst_flag_end(x)   TRACE_DEFINE_ENUM(RQ_##x);
+
+SVC_RQST_FLAG_LIST
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x)       { BIT(RQ_##x), #x },
+#define svc_rqst_flag_end(x)   { BIT(RQ_##x), #x }
+
 #define show_rqstp_flags(flags)                                                \
-       __print_flags(flags, "|",                                       \
-               { (1UL << RQ_SECURE),           "RQ_SECURE"},           \
-               { (1UL << RQ_LOCAL),            "RQ_LOCAL"},            \
-               { (1UL << RQ_USEDEFERRAL),      "RQ_USEDEFERRAL"},      \
-               { (1UL << RQ_DROPME),           "RQ_DROPME"},           \
-               { (1UL << RQ_SPLICE_OK),        "RQ_SPLICE_OK"},        \
-               { (1UL << RQ_VICTIM),           "RQ_VICTIM"},           \
-               { (1UL << RQ_BUSY),             "RQ_BUSY"})
+               __print_flags(flags, "|", SVC_RQST_FLAG_LIST)
 
 TRACE_EVENT(svc_recv,
        TP_PROTO(struct svc_rqst *rqst, int len),
index 4bee7de..82f3278 100644 (file)
@@ -1004,7 +1004,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
 #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
        fourcc_mod_code(AMLOGIC, \
                        ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
-                       ((__options) & __fourcc_mod_amlogic_options_mask \
+                       (((__options) & __fourcc_mod_amlogic_options_mask) \
                         << __fourcc_mod_amlogic_options_shift))
 
 /* Amlogic FBC Layouts */
index 3747426..62e6253 100644 (file)
@@ -82,7 +82,7 @@ struct args_ismountpoint {
 /*
  * All the ioctls use this structure.
  * When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
+ * of the chunk of memory otherwise it is the size of the
  * structure.
  */
 
index b134e67..0480f89 100644 (file)
@@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
        __u32   attach_type;            /* program attach type */
 };
 
+union bpf_iter_link_info {
+       struct {
+               __u32   map_fd;
+       } map;
+};
+
 /* BPF syscall commands, see bpf(2) man-page for details. */
 enum bpf_cmd {
        BPF_MAP_CREATE,
@@ -249,13 +255,6 @@ enum bpf_link_type {
        MAX_BPF_LINK_TYPE,
 };
 
-enum bpf_iter_link_info {
-       BPF_ITER_LINK_UNSPEC = 0,
-       BPF_ITER_LINK_MAP_FD = 1,
-
-       MAX_BPF_ITER_LINK_INFO,
-};
-
 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
  *
  * NONE(default): No further bpf programs allowed in the subtree.
@@ -623,6 +622,8 @@ union bpf_attr {
                };
                __u32           attach_type;    /* attach type */
                __u32           flags;          /* extra flags */
+               __aligned_u64   iter_info;      /* extra bpf_iter_link_info */
+               __u32           iter_info_len;  /* iter_info length */
        } link_create;
 
        struct { /* struct used by BPF_LINK_UPDATE command */
index c6dd021..2222094 100644 (file)
@@ -53,7 +53,7 @@ typedef __s64 Elf64_Sxword;
  *
  * - Oracle: Linker and Libraries.
  *   Part No: 817–1984–19, August 2011.
- *   http://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
+ *   https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
  *
  * - System V ABI AMD64 Architecture Processor Supplement
  *   Draft Version 0.99.4,
index e907b70..c2b2caf 100644 (file)
@@ -81,7 +81,10 @@ struct iommu_fault_unrecoverable {
 /**
  * struct iommu_fault_page_request - Page Request data
  * @flags: encodes whether the corresponding fields are valid and whether this
- *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
+ *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ *         must have the same PASID value as the page request. When it is clear,
+ *         the page response should not have a PASID.
  * @pasid: Process Address Space ID
  * @grpid: Page Request Group Index
  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
@@ -92,6 +95,7 @@ struct iommu_fault_page_request {
 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID   (1 << 0)
 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE     (1 << 1)
 #define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA     (1 << 2)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID  (1 << 3)
        __u32   flags;
        __u32   pasid;
        __u32   grpid;
index f9ed181..13a06e5 100644 (file)
@@ -24,7 +24,7 @@
  * of (ASCII) characters to a 7-segments notation.
  *
  * The 7 segment's wikipedia notation below is used as standard.
- * See: http://en.wikipedia.org/wiki/Seven_segment_display
+ * See: https://en.wikipedia.org/wiki/Seven_segment_display
  *
  * Notation:   +-a-+
  *             f   b
index 0e09dc5..8cf1e48 100644 (file)
@@ -245,6 +245,11 @@ struct nd_cmd_pkg {
 #define NVDIMM_FAMILY_MSFT 3
 #define NVDIMM_FAMILY_HYPERV 4
 #define NVDIMM_FAMILY_PAPR 5
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR
+
+#define NVDIMM_BUS_FAMILY_NFIT 0
+#define NVDIMM_BUS_FAMILY_INTEL 1
+#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_INTEL
 
 #define ND_IOCTL_CALL                  _IOWR(ND_IOCTL, ND_CMD_CALL,\
                                        struct nd_cmd_pkg)
index 8572930..bf197e9 100644 (file)
@@ -33,6 +33,9 @@
 #define NFS4_ACCESS_EXTEND      0x0008
 #define NFS4_ACCESS_DELETE      0x0010
 #define NFS4_ACCESS_EXECUTE     0x0020
+#define NFS4_ACCESS_XAREAD      0x0040
+#define NFS4_ACCESS_XAWRITE     0x0080
+#define NFS4_ACCESS_XALIST      0x0100
 
 #define NFS4_FH_PERSISTENT             0x0000
 #define NFS4_FH_NOEXPIRE_WITH_OPEN     0x0001
index 7bcc8cd..3afe376 100644 (file)
@@ -56,6 +56,7 @@
 #define NFSDBG_PNFS            0x1000
 #define NFSDBG_PNFS_LD         0x2000
 #define NFSDBG_STATE           0x4000
+#define NFSDBG_XATTRCACHE      0x8000
 #define NFSDBG_ALL             0xFFFF
 
 
diff --git a/include/uapi/linux/remoteproc_cdev.h b/include/uapi/linux/remoteproc_cdev.h
new file mode 100644 (file)
index 0000000..c43768e
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * IOCTLs for Remoteproc's character device interface.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_REMOTEPROC_CDEV_H_
+#define _UAPI_REMOTEPROC_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define RPROC_MAGIC    0xB7
+
+/*
+ * The RPROC_SET_SHUTDOWN_ON_RELEASE ioctl allows to enable/disable the shutdown of a remote
+ * processor automatically when the controlling userpsace closes the char device interface.
+ *
+ * input parameter: integer
+ *   0         : disable automatic shutdown
+ *   other     : enable automatic shutdown
+ */
+#define RPROC_SET_SHUTDOWN_ON_RELEASE _IOW(RPROC_MAGIC, 1, __s32)
+
+/*
+ * The RPROC_GET_SHUTDOWN_ON_RELEASE ioctl gets information about whether the automatic shutdown of
+ * a remote processor is enabled or disabled when the controlling userspace closes the char device
+ * interface.
+ *
+ * output parameter: integer
+ *   0         : automatic shutdown disable
+ *   other     : automatic shutdown enable
+ */
+#define RPROC_GET_SHUTDOWN_ON_RELEASE _IOR(RPROC_MAGIC, 2, __s32)
+
+#endif
index 2fce8b6..f6d2f83 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef __ASSEMBLY__
 #ifndef        __KERNEL__
 #ifndef __EXPORTED_HEADERS__
-#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders"
+#warning "Attempt to use kernel headers from user space, see https://kernelnewbies.org/KernelHeaders"
 #endif /* __EXPORTED_HEADERS__ */
 #endif
 
index 48766fd..0f865ae 100644 (file)
@@ -1229,7 +1229,7 @@ struct usb_set_sel_req {
  * As per USB compliance update, a device that is actively drawing
  * more than 100mA from USB must report itself as bus-powered in
  * the GetStatus(DEVICE) call.
- * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
+ * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
  */
 #define USB_SELF_POWER_VBUS_MAX_DRAW           100
 
index 0c23496..7523218 100644 (file)
@@ -91,6 +91,8 @@
 
 /* Use message type V2 */
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH  0x2
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
index 669457c..9a269a8 100644 (file)
@@ -60,6 +60,17 @@ struct vhost_iotlb_msg {
 #define VHOST_IOTLB_UPDATE         2
 #define VHOST_IOTLB_INVALIDATE     3
 #define VHOST_IOTLB_ACCESS_FAIL    4
+/*
+ * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
+ * multiple mappings in one go: beginning with
+ * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
+ * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
+ * When one of these two values is used as the message type, the rest
+ * of the fields in the message are ignored. There's no guarantee that
+ * these changes take place automatically in the device.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
        __u8 type;
 };
 
index 277c4ad..4410474 100644 (file)
@@ -25,7 +25,7 @@
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE. */
-#include <linux/types.h>
+#include <linux/virtio_types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 
@@ -36,7 +36,7 @@
 
 struct virtio_9p_config {
        /* length of the tag name */
-       __u16 tag_len;
+       __virtio16 tag_len;
        /* non-NULL terminated tag name */
        __u8 tag[0];
 } __attribute__((packed));
index dc3e656..ddaa45e 100644 (file)
 #define VIRTIO_BALLOON_CMD_ID_DONE     1
 struct virtio_balloon_config {
        /* Number of pages host wants Guest to give up. */
-       __u32 num_pages;
+       __le32 num_pages;
        /* Number of pages we've actually got in balloon. */
-       __u32 actual;
+       __le32 actual;
        /*
         * Free page hint command id, readonly by guest.
         * Was previously named free_page_report_cmd_id so we
         * need to carry that name for legacy support.
         */
        union {
-               __u32 free_page_hint_cmd_id;
-               __u32 free_page_report_cmd_id;  /* deprecated */
+               __le32 free_page_hint_cmd_id;
+               __le32 free_page_report_cmd_id; /* deprecated */
        };
        /* Stores PAGE_POISON if page poisoning is in use */
-       __u32 poison_val;
+       __le32 poison_val;
 };
 
 #define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */
index 0f99d7b..d888f01 100644 (file)
 
 struct virtio_blk_config {
        /* The capacity (in 512-byte sectors). */
-       __u64 capacity;
+       __virtio64 capacity;
        /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
-       __u32 size_max;
+       __virtio32 size_max;
        /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
-       __u32 seg_max;
+       __virtio32 seg_max;
        /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
        struct virtio_blk_geometry {
-               __u16 cylinders;
+               __virtio16 cylinders;
                __u8 heads;
                __u8 sectors;
        } geometry;
 
        /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
-       __u32 blk_size;
+       __virtio32 blk_size;
 
        /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY  */
        /* exponent for physical block per logical block. */
@@ -78,42 +78,42 @@ struct virtio_blk_config {
        /* alignment offset in logical blocks. */
        __u8 alignment_offset;
        /* minimum I/O size without performance penalty in logical blocks. */
-       __u16 min_io_size;
+       __virtio16 min_io_size;
        /* optimal sustained I/O size in logical blocks. */
-       __u32 opt_io_size;
+       __virtio32 opt_io_size;
 
        /* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
        __u8 wce;
        __u8 unused;
 
        /* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
-       __u16 num_queues;
+       __virtio16 num_queues;
 
        /* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
        /*
         * The maximum discard sectors (in 512-byte sectors) for
         * one segment.
         */
-       __u32 max_discard_sectors;
+       __virtio32 max_discard_sectors;
        /*
         * The maximum number of discard segments in a
         * discard command.
         */
-       __u32 max_discard_seg;
+       __virtio32 max_discard_seg;
        /* Discard commands must be aligned to this number of sectors. */
-       __u32 discard_sector_alignment;
+       __virtio32 discard_sector_alignment;
 
        /* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
        /*
         * The maximum number of write zeroes sectors (in 512-byte sectors) in
         * one segment.
         */
-       __u32 max_write_zeroes_sectors;
+       __virtio32 max_write_zeroes_sectors;
        /*
         * The maximum number of segments in a write zeroes
         * command.
         */
-       __u32 max_write_zeroes_seg;
+       __virtio32 max_write_zeroes_seg;
        /*
         * Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
         * deallocation of one or more of the sectors.
index ff8e7dc..b5eda06 100644 (file)
 #define VIRTIO_F_VERSION_1             32
 
 /*
- * If clear - device has the IOMMU bypass quirk feature.
- * If set - use platform tools to detect the IOMMU.
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
  *
  * Note the reverse polarity (compared to most other features),
  * this is for compatibility with legacy systems.
  */
-#define VIRTIO_F_IOMMU_PLATFORM                33
+#define VIRTIO_F_ACCESS_PLATFORM       33
+#ifndef __KERNEL__
+/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+#define VIRTIO_F_IOMMU_PLATFORM                VIRTIO_F_ACCESS_PLATFORM
+#endif /* __KERNEL__ */
 
 /* This feature indicates support for the packed virtqueue layout. */
 #define VIRTIO_F_RING_PACKED           34
index b7fb108..7e6ec2f 100644 (file)
 
 struct virtio_console_config {
        /* colums of the screens */
-       __u16 cols;
+       __virtio16 cols;
        /* rows of the screens */
-       __u16 rows;
+       __virtio16 rows;
        /* max. number of ports this device can hold */
-       __u32 max_nr_ports;
+       __virtio32 max_nr_ports;
        /* emergency write register */
-       __u32 emerg_wr;
+       __virtio32 emerg_wr;
 } __attribute__((packed));
 
 /*
index 50cdc8a..a03932f 100644 (file)
@@ -414,33 +414,33 @@ struct virtio_crypto_op_data_req {
 
 struct virtio_crypto_config {
        /* See VIRTIO_CRYPTO_OP_* above */
-       __u32  status;
+       __le32  status;
 
        /*
         * Maximum number of data queue
         */
-       __u32  max_dataqueues;
+       __le32  max_dataqueues;
 
        /*
         * Specifies the services mask which the device support,
         * see VIRTIO_CRYPTO_SERVICE_* above
         */
-       __u32 crypto_services;
+       __le32 crypto_services;
 
        /* Detailed algorithms mask */
-       __u32 cipher_algo_l;
-       __u32 cipher_algo_h;
-       __u32 hash_algo;
-       __u32 mac_algo_l;
-       __u32 mac_algo_h;
-       __u32 aead_algo;
+       __le32 cipher_algo_l;
+       __le32 cipher_algo_h;
+       __le32 hash_algo;
+       __le32 mac_algo_l;
+       __le32 mac_algo_h;
+       __le32 aead_algo;
        /* Maximum length of cipher key */
-       __u32 max_cipher_key_len;
+       __le32 max_cipher_key_len;
        /* Maximum length of authenticated key */
-       __u32 max_auth_key_len;
-       __u32 reserve;
+       __le32 max_auth_key_len;
+       __le32 reserve;
        /* Maximum size of each crypto request's content */
-       __u64 max_size;
+       __le64 max_size;
 };
 
 struct virtio_crypto_inhdr {
index b02eb2a..3056b6e 100644 (file)
@@ -13,7 +13,7 @@ struct virtio_fs_config {
        __u8 tag[36];
 
        /* Number of request queues */
-       __u32 num_request_queues;
+       __le32 num_request_queues;
 } __attribute__((packed));
 
 #endif /* _UAPI_LINUX_VIRTIO_FS_H */
index 0c85914..ccbd174 100644 (file)
@@ -320,10 +320,10 @@ struct virtio_gpu_resp_edid {
 #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
 
 struct virtio_gpu_config {
-       __u32 events_read;
-       __u32 events_clear;
-       __u32 num_scanouts;
-       __u32 num_capsets;
+       __le32 events_read;
+       __le32 events_clear;
+       __le32 num_scanouts;
+       __le32 num_capsets;
 };
 
 /* simple formats for fbcon/X use */
index a7fe5c8..52084b1 100644 (file)
@@ -40,18 +40,18 @@ enum virtio_input_config_select {
 };
 
 struct virtio_input_absinfo {
-       __u32 min;
-       __u32 max;
-       __u32 fuzz;
-       __u32 flat;
-       __u32 res;
+       __le32 min;
+       __le32 max;
+       __le32 fuzz;
+       __le32 flat;
+       __le32 res;
 };
 
 struct virtio_input_devids {
-       __u16 bustype;
-       __u16 vendor;
-       __u16 product;
-       __u16 version;
+       __le16 bustype;
+       __le16 vendor;
+       __le16 product;
+       __le16 version;
 };
 
 struct virtio_input_config {
index 48e3c29..237e36a 100644 (file)
 #define VIRTIO_IOMMU_F_MMIO                    5
 
 struct virtio_iommu_range_64 {
-       __u64                                   start;
-       __u64                                   end;
+       __le64                                  start;
+       __le64                                  end;
 };
 
 struct virtio_iommu_range_32 {
-       __u32                                   start;
-       __u32                                   end;
+       __le32                                  start;
+       __le32                                  end;
 };
 
 struct virtio_iommu_config {
        /* Supported page sizes */
-       __u64                                   page_size_mask;
+       __le64                                  page_size_mask;
        /* Supported IOVA range */
        struct virtio_iommu_range_64            input_range;
        /* Max domain ID size */
        struct virtio_iommu_range_32            domain_range;
        /* Probe buffer size */
-       __u32                                   probe_size;
+       __le32                                  probe_size;
 };
 
 /* Request types */
index a9ffe04..70e01c6 100644 (file)
@@ -185,27 +185,27 @@ struct virtio_mem_resp {
 
 struct virtio_mem_config {
        /* Block size and alignment. Cannot change. */
-       __u64 block_size;
+       __le64 block_size;
        /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
-       __u16 node_id;
+       __le16 node_id;
        __u8 padding[6];
        /* Start address of the memory region. Cannot change. */
-       __u64 addr;
+       __le64 addr;
        /* Region size (maximum). Cannot change. */
-       __u64 region_size;
+       __le64 region_size;
        /*
         * Currently usable region size. Can grow up to region_size. Can
         * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
         * update will be sent).
         */
-       __u64 usable_region_size;
+       __le64 usable_region_size;
        /*
         * Currently used size. Changes due to plug/unplug requests, but no
         * config updates will be sent.
         */
-       __u64 plugged_size;
+       __le64 plugged_size;
        /* Requested size. New plug requests cannot exceed it. Can change. */
-       __u64 requested_size;
+       __le64 requested_size;
 };
 
 #endif /* _LINUX_VIRTIO_MEM_H */
index 19d23e5..3f55a42 100644 (file)
@@ -87,19 +87,19 @@ struct virtio_net_config {
        /* The config defining mac address (if VIRTIO_NET_F_MAC) */
        __u8 mac[ETH_ALEN];
        /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
-       __u16 status;
+       __virtio16 status;
        /* Maximum number of each of transmit and receive queues;
         * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
         * Legal values are between 1 and 0x8000
         */
-       __u16 max_virtqueue_pairs;
+       __virtio16 max_virtqueue_pairs;
        /* Default maximum transmit unit advice */
-       __u16 mtu;
+       __virtio16 mtu;
        /*
         * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
         * Any other value stands for unknown.
         */
-       __u32 speed;
+       __le32 speed;
        /*
         * 0x00 - half duplex
         * 0x01 - full duplex
index b022787..d676b36 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/virtio_config.h>
 
 struct virtio_pmem_config {
-       __u64 start;
-       __u64 size;
+       __le64 start;
+       __le64 size;
 };
 
 #define VIRTIO_PMEM_REQ_TYPE_FLUSH      0
index cc18ef8..0abaae4 100644 (file)
@@ -103,16 +103,16 @@ struct virtio_scsi_event {
 } __attribute__((packed));
 
 struct virtio_scsi_config {
-       __u32 num_queues;
-       __u32 seg_max;
-       __u32 max_sectors;
-       __u32 cmd_per_lun;
-       __u32 event_info_size;
-       __u32 sense_size;
-       __u32 cdb_size;
-       __u16 max_channel;
-       __u16 max_target;
-       __u32 max_lun;
+       __virtio32 num_queues;
+       __virtio32 seg_max;
+       __virtio32 max_sectors;
+       __virtio32 cmd_per_lun;
+       __virtio32 event_info_size;
+       __virtio32 sense_size;
+       __virtio32 cdb_size;
+       __virtio16 max_channel;
+       __virtio16 max_target;
+       __virtio32 max_lun;
 } __attribute__((packed));
 
 /* Feature Bits */
index 4b48fbf..65b9db9 100644 (file)
@@ -262,7 +262,7 @@ struct mtd_ecc_stats {
  * @MTD_FILE_MODE_OTP_USER:    OTP enabled in user mode
  * @MTD_FILE_MODE_RAW:         OTP disabled, ECC disabled
  *
- * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained
+ * These modes can be set via ioctl(MTDFILEMODE). The mode will be retained
  * separately for each open file descriptor.
  *
  * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
index ee810ca..73eb622 100644 (file)
 #include <vdso/time32.h>
 #include <vdso/time64.h>
 
+#ifdef CONFIG_ARCH_HAS_VDSO_DATA
+#include <asm/vdso/data.h>
+#else
+struct arch_vdso_data {};
+#endif
+
 #define VDSO_BASES     (CLOCK_TAI + 1)
 #define VDSO_HRES      (BIT(CLOCK_REALTIME)            | \
                         BIT(CLOCK_MONOTONIC)           | \
@@ -64,6 +70,8 @@ struct vdso_timestamp {
  * @tz_dsttime:                type of DST correction
  * @hrtimer_res:       hrtimer resolution
  * @__unused:          unused
+ * @arch_data:         architecture specific data (optional, defaults
+ *                     to an empty struct)
  *
  * vdso_data will be accessed by 64 bit and compat code at the same time
  * so we should be careful before modifying this structure.
@@ -97,6 +105,8 @@ struct vdso_data {
        s32                     tz_dsttime;
        u32                     hrtimer_res;
        u32                     __unused;
+
+       struct arch_vdso_data   arch_data;
 };
 
 /*
index 2c6134e..b0fdc9c 100644 (file)
@@ -6,6 +6,9 @@
 
 #include <asm/vdso/vsyscall.h>
 
+unsigned long vdso_update_begin(void);
+void vdso_update_end(unsigned long flags);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __VDSO_VSYSCALL_H */
index fdc279d..d43ca03 100644 (file)
@@ -38,7 +38,8 @@
  *                           Protocol version
  ******************************************************************************
  */
-#define XENDISPL_PROTOCOL_VERSION      "1"
+#define XENDISPL_PROTOCOL_VERSION      "2"
+#define XENDISPL_PROTOCOL_VERSION_INT   2
 
 /*
  ******************************************************************************
  *      Width and height of the connector in pixels separated by
  *      XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
  *      display.
+ *      If backend provides extended display identification data (EDID) with
+ *      XENDISPL_OP_GET_EDID request then EDID values must take precedence
+ *      over the resolutions defined here.
  *
  *------------------ Connector Request Transport Parameters -------------------
  *
 #define XENDISPL_OP_FB_DETACH          0x13
 #define XENDISPL_OP_SET_CONFIG         0x14
 #define XENDISPL_OP_PG_FLIP            0x15
+/* The below command is available in protocol version 2 and above. */
+#define XENDISPL_OP_GET_EDID           0x16
 
 /*
  ******************************************************************************
 #define XENDISPL_FIELD_BE_ALLOC                "be-alloc"
 #define XENDISPL_FIELD_UNIQUE_ID       "unique-id"
 
+#define XENDISPL_EDID_BLOCK_SIZE       128
+#define XENDISPL_EDID_BLOCK_COUNT      256
+#define XENDISPL_EDID_MAX_SIZE         (XENDISPL_EDID_BLOCK_SIZE * XENDISPL_EDID_BLOCK_COUNT)
+
 /*
  ******************************************************************************
  *                          STATUS RETURN CODES
  * +----------------+----------------+----------------+----------------+
  * |                           gref_directory                          | 40
  * +----------------+----------------+----------------+----------------+
- * |                             reserved                              | 44
+ * |                             data_ofs                              | 44
+ * +----------------+----------------+----------------+----------------+
+ * |                             reserved                              | 48
  * +----------------+----------------+----------------+----------------+
  * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
  * +----------------+----------------+----------------+----------------+
  *   buffer size (buffer_sz) exceeds what can be addressed by this single page,
  *   then reference to the next page must be supplied (see gref_dir_next_page
  *   below)
+ * data_ofs - uint32_t, offset of the data in the buffer, octets
  */
 
 #define XENDISPL_DBUF_FLG_REQ_ALLOC    (1 << 0)
@@ -506,6 +519,7 @@ struct xendispl_dbuf_create_req {
        uint32_t buffer_sz;
        uint32_t flags;
        grant_ref_t gref_directory;
+       uint32_t data_ofs;
 };
 
 /*
@@ -732,6 +746,44 @@ struct xendispl_page_flip_req {
 };
 
 /*
+ * Request EDID - request EDID describing current connector:
+ *         0                1                 2               3        octet
+ * +----------------+----------------+----------------+----------------+
+ * |               id                | _OP_GET_EDID   |   reserved     | 4
+ * +----------------+----------------+----------------+----------------+
+ * |                             buffer_sz                             | 8
+ * +----------------+----------------+----------------+----------------+
+ * |                          gref_directory                           | 12
+ * +----------------+----------------+----------------+----------------+
+ * |                             reserved                              | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * |                             reserved                              | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ *   - This command is not available in protocol version 1 and should be
+ *     ignored.
+ *   - This request is optional and if not supported then visible area
+ *     is defined by the relevant XenStore's "resolution" property.
+ *   - Shared buffer, allocated for EDID storage, must not be less then
+ *     XENDISPL_EDID_MAX_SIZE octets.
+ *
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ *   describing EDID buffer references. See XENDISPL_OP_DBUF_CREATE for
+ *   grant page directory structure (struct xendispl_page_directory).
+ *
+ * See response format for this request.
+ */
+
+struct xendispl_get_edid_req {
+       uint32_t buffer_sz;
+       grant_ref_t gref_directory;
+};
+
+/*
  *---------------------------------- Responses --------------------------------
  *
  * All response packets have the same length (64 octets)
@@ -753,6 +805,35 @@ struct xendispl_page_flip_req {
  * id - uint16_t, private guest value, echoed from request
  * status - int32_t, response status, zero on success and -XEN_EXX on failure
  *
+ *
+ * Get EDID response - response for XENDISPL_OP_GET_EDID:
+ *         0                1                 2               3        octet
+ * +----------------+----------------+----------------+----------------+
+ * |               id                |    operation   |    reserved    | 4
+ * +----------------+----------------+----------------+----------------+
+ * |                              status                               | 8
+ * +----------------+----------------+----------------+----------------+
+ * |                             edid_sz                               | 12
+ * +----------------+----------------+----------------+----------------+
+ * |                             reserved                              | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * |                             reserved                              | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ *   - This response is not available in protocol version 1 and should be
+ *     ignored.
+ *
+ * edid_sz - uint32_t, size of the EDID, octets
+ */
+
+struct xendispl_get_edid_resp {
+       uint32_t edid_sz;
+};
+
+/*
  *----------------------------------- Events ----------------------------------
  *
  * Events are sent via a shared page allocated by the front and propagated by
@@ -804,6 +885,7 @@ struct xendispl_req {
                struct xendispl_fb_detach_req fb_detach;
                struct xendispl_set_config_req set_config;
                struct xendispl_page_flip_req pg_flip;
+               struct xendispl_get_edid_req get_edid;
                uint8_t reserved[56];
        } op;
 };
@@ -813,7 +895,10 @@ struct xendispl_resp {
        uint8_t operation;
        uint8_t reserved;
        int32_t status;
-       uint8_t reserved1[56];
+       union {
+               struct xendispl_get_edid_resp get_edid;
+               uint8_t reserved1[56];
+       } op;
 };
 
 struct xendispl_evt {
index a3eb384..f6889fc 100644 (file)
@@ -65,6 +65,7 @@ struct task_struct init_task
 #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
        __init_task_data
 #endif
+       __aligned(L1_CACHE_BYTES)
 = {
 #ifdef CONFIG_THREAD_INFO_IN_TASK
        .thread_info    = INIT_THREAD_INFO(init_task),
@@ -154,7 +155,8 @@ struct task_struct init_task
        .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
 #endif
 #ifdef CONFIG_CPUSETS
-       .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+       .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
+                                                &init_task.alloc_lock),
 #endif
 #ifdef CONFIG_RT_MUTEXES
        .pi_waiters     = RB_ROOT_CACHED,
index 3687b71..8c0244e 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -585,8 +585,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
 /*
  * Called with sem_ids.rwsem and ipcp locked.
  */
-static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
-                               struct ipc_params *params)
+static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 {
        struct sem_array *sma;
 
index bf38d7e..f1ed36e 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -711,8 +711,7 @@ no_file:
 /*
  * Called with shm_ids.rwsem and ipcp locked.
  */
-static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
-                               struct ipc_params *params)
+static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
 {
        struct shmid_kernel *shp;
 
@@ -1381,7 +1380,6 @@ static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int versio
        case SHM_LOCK:
        case SHM_UNLOCK:
                return shmctl_do_lock(ns, shmid, cmd);
-               break;
        default:
                return -EINVAL;
        }
index 5350fd2..9a20016 100644 (file)
@@ -5,7 +5,7 @@
 
 obj-y     = fork.o exec_domain.o panic.o \
            cpu.o exit.o softirq.o resource.o \
-           sysctl.o sysctl_binary.o capability.o ptrace.o user.o \
+           sysctl.o capability.o ptrace.o user.o \
            signal.o sys.o umh.o workqueue.o pid.o task_work.o \
            extable.o params.o \
            kthread.o sys_ni.o nsproxy.o \
@@ -36,7 +36,7 @@ KCOV_INSTRUMENT_stacktrace.o := n
 KCOV_INSTRUMENT_kcov.o := n
 KASAN_SANITIZE_kcov.o := n
 KCSAN_SANITIZE_kcov.o := n
-CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
 
 # cond_syscall is currently not LTO compatible
 CFLAGS_sys_ni.o = $(DISABLE_LTO)
index 363b9ca..b671596 100644 (file)
@@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link)
        struct bpf_iter_link *iter_link =
                container_of(link, struct bpf_iter_link, link);
 
-       if (iter_link->aux.map)
-               bpf_map_put_with_uref(iter_link->aux.map);
+       if (iter_link->tinfo->reg_info->detach_target)
+               iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
 }
 
 static void bpf_iter_link_dealloc(struct bpf_link *link)
@@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link)
 
 int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
+       union bpf_iter_link_info __user *ulinfo;
        struct bpf_link_primer link_primer;
        struct bpf_iter_target_info *tinfo;
-       struct bpf_iter_aux_info aux = {};
+       union bpf_iter_link_info linfo;
        struct bpf_iter_link *link;
-       u32 prog_btf_id, target_fd;
+       u32 prog_btf_id, linfo_len;
        bool existed = false;
-       struct bpf_map *map;
        int err;
 
+       if (attr->link_create.target_fd || attr->link_create.flags)
+               return -EINVAL;
+
+       memset(&linfo, 0, sizeof(union bpf_iter_link_info));
+
+       ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
+       linfo_len = attr->link_create.iter_info_len;
+       if (!ulinfo ^ !linfo_len)
+               return -EINVAL;
+
+       if (ulinfo) {
+               err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
+                                              linfo_len);
+               if (err)
+                       return err;
+               linfo_len = min_t(u32, linfo_len, sizeof(linfo));
+               if (copy_from_user(&linfo, ulinfo, linfo_len))
+                       return -EFAULT;
+       }
+
        prog_btf_id = prog->aux->attach_btf_id;
        mutex_lock(&targets_mutex);
        list_for_each_entry(tinfo, &targets, list) {
@@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        if (!existed)
                return -ENOENT;
 
-       /* Make sure user supplied flags are target expected. */
-       target_fd = attr->link_create.target_fd;
-       if (attr->link_create.flags != tinfo->reg_info->req_linfo)
-               return -EINVAL;
-       if (!attr->link_create.flags && target_fd)
-               return -EINVAL;
-
        link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
        if (!link)
                return -ENOMEM;
@@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
                return err;
        }
 
-       if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) {
-               map = bpf_map_get_with_uref(target_fd);
-               if (IS_ERR(map)) {
-                       err = PTR_ERR(map);
-                       goto cleanup_link;
-               }
-
-               aux.map = map;
-               err = tinfo->reg_info->check_target(prog, &aux);
+       if (tinfo->reg_info->attach_target) {
+               err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
                if (err) {
-                       bpf_map_put_with_uref(map);
-                       goto cleanup_link;
+                       bpf_link_cleanup(&link_primer);
+                       return err;
                }
-
-               link->aux.map = map;
        }
 
        return bpf_link_settle(&link_primer);
-
-cleanup_link:
-       bpf_link_cleanup(&link_primer);
-       return err;
 }
 
 static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
index bde9334..ed0b357 100644 (file)
@@ -1966,7 +1966,7 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
  * @index: the index of the program to replace
  *
  * Skips over dummy programs, by not counting them, when calculating
- * the the position of the program to replace.
+ * the position of the program to replace.
  *
  * Return:
  * * 0         - Success
index fbe1f55..af86048 100644 (file)
@@ -98,12 +98,21 @@ static struct bpf_iter_reg bpf_map_reg_info = {
        .seq_info               = &bpf_map_seq_info,
 };
 
-static int bpf_iter_check_map(struct bpf_prog *prog,
-                             struct bpf_iter_aux_info *aux)
+static int bpf_iter_attach_map(struct bpf_prog *prog,
+                              union bpf_iter_link_info *linfo,
+                              struct bpf_iter_aux_info *aux)
 {
        u32 key_acc_size, value_acc_size, key_size, value_size;
-       struct bpf_map *map = aux->map;
+       struct bpf_map *map;
        bool is_percpu = false;
+       int err = -EINVAL;
+
+       if (!linfo->map.map_fd)
+               return -EBADF;
+
+       map = bpf_map_get_with_uref(linfo->map.map_fd);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
 
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
@@ -112,7 +121,7 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
        else if (map->map_type != BPF_MAP_TYPE_HASH &&
                 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
                 map->map_type != BPF_MAP_TYPE_ARRAY)
-               return -EINVAL;
+               goto put_map;
 
        key_acc_size = prog->aux->max_rdonly_access;
        value_acc_size = prog->aux->max_rdwr_access;
@@ -122,10 +131,22 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
        else
                value_size = round_up(map->value_size, 8) * num_possible_cpus();
 
-       if (key_acc_size > key_size || value_acc_size > value_size)
-               return -EACCES;
+       if (key_acc_size > key_size || value_acc_size > value_size) {
+               err = -EACCES;
+               goto put_map;
+       }
 
+       aux->map = map;
        return 0;
+
+put_map:
+       bpf_map_put_with_uref(map);
+       return err;
+}
+
+static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
+{
+       bpf_map_put_with_uref(aux->map);
 }
 
 DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
@@ -133,8 +154,8 @@ DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
 
 static const struct bpf_iter_reg bpf_map_elem_reg_info = {
        .target                 = "bpf_map_elem",
-       .check_target           = bpf_iter_check_map,
-       .req_linfo              = BPF_ITER_LINK_MAP_FD,
+       .attach_target          = bpf_iter_attach_map,
+       .detach_target          = bpf_iter_detach_map,
        .ctx_arg_info_size      = 2,
        .ctx_arg_info           = {
                { offsetof(struct bpf_iter__bpf_map_elem, key),
index 4fd830a..cfed0ac 100644 (file)
@@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr,
 
        phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
 
-       for (i = 0; i < ehdr->e_phnum; ++i)
-               if (phdr[i].p_type == PT_NOTE)
-                       return stack_map_parse_build_id(page_addr, build_id,
-                                       page_addr + phdr[i].p_offset,
-                                       phdr[i].p_filesz);
+       for (i = 0; i < ehdr->e_phnum; ++i) {
+               if (phdr[i].p_type == PT_NOTE &&
+                   !stack_map_parse_build_id(page_addr, build_id,
+                                             page_addr + phdr[i].p_offset,
+                                             phdr[i].p_filesz))
+                       return 0;
+       }
        return -EINVAL;
 }
 
@@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr,
 
        phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
 
-       for (i = 0; i < ehdr->e_phnum; ++i)
-               if (phdr[i].p_type == PT_NOTE)
-                       return stack_map_parse_build_id(page_addr, build_id,
-                                       page_addr + phdr[i].p_offset,
-                                       phdr[i].p_filesz);
+       for (i = 0; i < ehdr->e_phnum; ++i) {
+               if (phdr[i].p_type == PT_NOTE &&
+                   !stack_map_parse_build_id(page_addr, build_id,
+                                             page_addr + phdr[i].p_offset,
+                                             phdr[i].p_filesz))
+                       return 0;
+       }
        return -EINVAL;
 }
 
index 2f343ce..86299a2 100644 (file)
@@ -3883,7 +3883,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
        return -EINVAL;
 }
 
-#define BPF_LINK_CREATE_LAST_FIELD link_create.flags
+#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
 static int link_create(union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
index b6ccfce..ef938f1 100644 (file)
@@ -8294,7 +8294,7 @@ static bool stacksafe(struct bpf_func_state *old,
                if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
                    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
                        /* Ex: old explored (safe) state has STACK_SPILL in
-                        * this stack slot, but current has has STACK_MISC ->
+                        * this stack slot, but current has STACK_MISC ->
                         * this verifier states are not equivalent,
                         * return false to continue verification of this path
                         */
index 1817568..106e450 100644 (file)
@@ -11,6 +11,8 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 
+#include <crypto/sha.h>
+
 /* vmcoreinfo stuff */
 unsigned char *vmcoreinfo_data;
 size_t vmcoreinfo_size;
@@ -376,6 +378,53 @@ phys_addr_t __weak paddr_vmcoreinfo_note(void)
 }
 EXPORT_SYMBOL(paddr_vmcoreinfo_note);
 
+#define NOTES_SIZE (&__stop_notes - &__start_notes)
+#define BUILD_ID_MAX SHA1_DIGEST_SIZE
+#define NT_GNU_BUILD_ID 3
+
+struct elf_note_section {
+       struct elf_note n_hdr;
+       u8 n_data[];
+};
+
+/*
+ * Add build ID from .notes section as generated by the GNU ld(1)
+ * or LLVM lld(1) --build-id option.
+ */
+static void add_build_id_vmcoreinfo(void)
+{
+       char build_id[BUILD_ID_MAX * 2 + 1];
+       int n_remain = NOTES_SIZE;
+
+       while (n_remain >= sizeof(struct elf_note)) {
+               const struct elf_note_section *note_sec =
+                       &__start_notes + NOTES_SIZE - n_remain;
+               const u32 n_namesz = note_sec->n_hdr.n_namesz;
+
+               if (note_sec->n_hdr.n_type == NT_GNU_BUILD_ID &&
+                   n_namesz != 0 &&
+                   !strcmp((char *)&note_sec->n_data[0], "GNU")) {
+                       if (note_sec->n_hdr.n_descsz <= BUILD_ID_MAX) {
+                               const u32 n_descsz = note_sec->n_hdr.n_descsz;
+                               const u8 *s = &note_sec->n_data[n_namesz];
+
+                               s = PTR_ALIGN(s, 4);
+                               bin2hex(build_id, s, n_descsz);
+                               build_id[2 * n_descsz] = '\0';
+                               VMCOREINFO_BUILD_ID(build_id);
+                               return;
+                       }
+                       pr_warn("Build ID is too large to include in vmcoreinfo: %u > %u\n",
+                               note_sec->n_hdr.n_descsz,
+                               BUILD_ID_MAX);
+                       return;
+               }
+               n_remain -= sizeof(struct elf_note) +
+                       ALIGN(note_sec->n_hdr.n_namesz, 4) +
+                       ALIGN(note_sec->n_hdr.n_descsz, 4);
+       }
+}
+
 static int __init crash_save_vmcoreinfo_init(void)
 {
        vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL);
@@ -394,6 +443,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        }
 
        VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
+       add_build_id_vmcoreinfo();
        VMCOREINFO_PAGESIZE(PAGE_SIZE);
 
        VMCOREINFO_SYMBOL(init_uts_ns);
index f4770fc..847a9d1 100644 (file)
@@ -1,5 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+config NO_DMA
+       bool
+
 config HAS_DMA
        bool
        depends on !NO_DMA
@@ -186,11 +189,6 @@ config DMA_API_DEBUG
          drivers like double-freeing of DMA mappings or freeing mappings that
          were never allocated.
 
-         This also attempts to catch cases where a page owned by DMA is
-         accessed by the cpu in a way that could cause data corruption.  For
-         example, this enables cow_user_page() to check that the source page is
-         not undergoing DMA.
-
          This option causes a performance degradation.  Use only if you want to
          debug device drivers and dma interactions.
 
index f7f807f..8e9f7b3 100644 (file)
@@ -448,9 +448,6 @@ void debug_dma_dump_mappings(struct device *dev)
  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
  * entries into the tree.
- *
- * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if any cachelines in the given page are in the active set.
  */
 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
 static DEFINE_SPINLOCK(radix_lock);
@@ -497,10 +494,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln)
        overlap = active_cacheline_set_overlap(cln, ++overlap);
 
        /* If we overflowed the overlap counter then we're potentially
-        * leaking dma-mappings.  Otherwise, if maps and unmaps are
-        * balanced then this overflow may cause false negatives in
-        * debug_dma_assert_idle() as the cacheline may be marked idle
-        * prematurely.
+        * leaking dma-mappings.
         */
        WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
                  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
@@ -555,53 +549,6 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
        spin_unlock_irqrestore(&radix_lock, flags);
 }
 
-/**
- * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_cacheline tree
- *
- * Place a call to this routine in cases where the cpu touching the page
- * before the dma completes (page is dma_unmapped) will lead to data
- * corruption.
- */
-void debug_dma_assert_idle(struct page *page)
-{
-       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
-       struct dma_debug_entry *entry = NULL;
-       void **results = (void **) &ents;
-       unsigned int nents, i;
-       unsigned long flags;
-       phys_addr_t cln;
-
-       if (dma_debug_disabled())
-               return;
-
-       if (!page)
-               return;
-
-       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
-       spin_lock_irqsave(&radix_lock, flags);
-       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
-                                      CACHELINES_PER_PAGE);
-       for (i = 0; i < nents; i++) {
-               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
-
-               if (ent_cln == cln) {
-                       entry = ents[i];
-                       break;
-               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
-                       break;
-       }
-       spin_unlock_irqrestore(&radix_lock, flags);
-
-       if (!entry)
-               return;
-
-       cln = to_cacheline_number(entry);
-       err_printk(entry->dev, entry,
-                  "cpu touching an active dma mapped cacheline [cln=%pa]\n",
-                  &cln);
-}
-
 /*
  * Wrapper function for adding an entry to the hash.
  * This function takes care of locking itself.
index bb0041e..db6ef07 100644 (file)
@@ -43,7 +43,7 @@ u64 dma_direct_get_required_mask(struct device *dev)
        return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
 }
 
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
                                  u64 *phys_limit)
 {
        u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
@@ -68,7 +68,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
        return 0;
 }
 
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
        return phys_to_dma_direct(dev, phys) + size - 1 <=
                        min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
@@ -161,8 +161,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        size = PAGE_ALIGN(size);
 
        if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
-               ret = dma_alloc_from_pool(dev, size, &page, gfp);
-               if (!ret)
+               u64 phys_mask;
+
+               gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+                               &phys_mask);
+               page = dma_alloc_from_pool(dev, size, &ret, gfp,
+                               dma_coherent_ok);
+               if (!page)
                        return NULL;
                goto done;
        }
index 6bc74a2..06582b4 100644 (file)
@@ -3,7 +3,9 @@
  * Copyright (C) 2012 ARM Ltd.
  * Copyright (C) 2020 Google LLC
  */
+#include <linux/cma.h>
 #include <linux/debugfs.h>
+#include <linux/dma-contiguous.h>
 #include <linux/dma-direct.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/init.h>
@@ -55,6 +57,29 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
                pool_size_kernel += size;
 }
 
+static bool cma_in_zone(gfp_t gfp)
+{
+       unsigned long size;
+       phys_addr_t end;
+       struct cma *cma;
+
+       cma = dev_get_cma_area(NULL);
+       if (!cma)
+               return false;
+
+       size = cma_get_size(cma);
+       if (!size)
+               return false;
+
+       /* CMA can't cross zone boundaries, see cma_activate_area() */
+       end = cma_get_base(cma) + size - 1;
+       if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+               return end <= DMA_BIT_MASK(zone_dma_bits);
+       if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+               return end <= DMA_BIT_MASK(32);
+       return true;
+}
+
 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
                              gfp_t gfp)
 {
@@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
 
        do {
                pool_size = 1 << (PAGE_SHIFT + order);
-               page = alloc_pages(gfp, order);
+               if (cma_in_zone(gfp))
+                       page = dma_alloc_from_contiguous(NULL, 1 << order,
+                                                        order, false);
+               if (!page)
+                       page = alloc_pages(gfp, order);
        } while (!page && order-- > 0);
        if (!page)
                goto out;
@@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void)
 }
 postcore_initcall(dma_atomic_pool_init);
 
-static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
+static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
 {
-       u64 phys_mask;
-       gfp_t gfp;
-
-       gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
-                                         &phys_mask);
-       if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
+       if (prev == NULL) {
+               if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+                       return atomic_pool_dma32;
+               if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+                       return atomic_pool_dma;
+               return atomic_pool_kernel;
+       }
+       if (prev == atomic_pool_kernel)
+               return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
+       if (prev == atomic_pool_dma32)
                return atomic_pool_dma;
-       if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
-               return atomic_pool_dma32;
-       return atomic_pool_kernel;
+       return NULL;
 }
 
-static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
+static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
+               struct gen_pool *pool, void **cpu_addr,
+               bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
 {
-       if (bad_pool == atomic_pool_kernel)
-               return atomic_pool_dma32 ? : atomic_pool_dma;
+       unsigned long addr;
+       phys_addr_t phys;
 
-       if (bad_pool == atomic_pool_dma32)
-               return atomic_pool_dma;
+       addr = gen_pool_alloc(pool, size);
+       if (!addr)
+               return NULL;
 
-       return NULL;
-}
+       phys = gen_pool_virt_to_phys(pool, addr);
+       if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
+               gen_pool_free(pool, addr, size);
+               return NULL;
+       }
 
-static inline struct gen_pool *dma_guess_pool(struct device *dev,
-                                             struct gen_pool *bad_pool)
-{
-       if (bad_pool)
-               return dma_get_safer_pool(bad_pool);
+       if (gen_pool_avail(pool) < atomic_pool_size)
+               schedule_work(&atomic_pool_work);
 
-       return dma_guess_pool_from_device(dev);
+       *cpu_addr = (void *)addr;
+       memset(*cpu_addr, 0, size);
+       return pfn_to_page(__phys_to_pfn(phys));
 }
 
-void *dma_alloc_from_pool(struct device *dev, size_t size,
-                         struct page **ret_page, gfp_t flags)
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+               void **cpu_addr, gfp_t gfp,
+               bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
 {
        struct gen_pool *pool = NULL;
-       unsigned long val = 0;
-       void *ptr = NULL;
-       phys_addr_t phys;
-
-       while (1) {
-               pool = dma_guess_pool(dev, pool);
-               if (!pool) {
-                       WARN(1, "Failed to get suitable pool for %s\n",
-                            dev_name(dev));
-                       break;
-               }
-
-               val = gen_pool_alloc(pool, size);
-               if (!val)
-                       continue;
-
-               phys = gen_pool_virt_to_phys(pool, val);
-               if (dma_coherent_ok(dev, phys, size))
-                       break;
-
-               gen_pool_free(pool, val, size);
-               val = 0;
-       }
-
-
-       if (val) {
-               *ret_page = pfn_to_page(__phys_to_pfn(phys));
-               ptr = (void *)val;
-               memset(ptr, 0, size);
+       struct page *page;
 
-               if (gen_pool_avail(pool) < atomic_pool_size)
-                       schedule_work(&atomic_pool_work);
+       while ((pool = dma_guess_pool(pool, gfp))) {
+               page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
+                                            phys_addr_ok);
+               if (page)
+                       return page;
        }
 
-       return ptr;
+       WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
+       return NULL;
 }
 
 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
 {
        struct gen_pool *pool = NULL;
 
-       while (1) {
-               pool = dma_guess_pool(dev, pool);
-               if (!pool)
-                       return false;
-
-               if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
-                       gen_pool_free(pool, (unsigned long)start, size);
-                       return true;
-               }
+       while ((pool = dma_guess_pool(pool, 0))) {
+               if (!gen_pool_has_addr(pool, (unsigned long)start, size))
+                       continue;
+               gen_pool_free(pool, (unsigned long)start, size);
+               return true;
        }
+
+       return false;
 }
index c6ce894..58cbe35 100644 (file)
@@ -217,10 +217,9 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
                        if (add_mark)
                                perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
 
-                       fs = get_fs();
-                       set_fs(USER_DS);
+                       fs = force_uaccess_begin();
                        perf_callchain_user(&ctx, regs);
-                       set_fs(fs);
+                       force_uaccess_end(fs);
                }
        }
 
index d1f0a7e..5bfe8e3 100644 (file)
@@ -6453,10 +6453,9 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
-               fs = get_fs();
-               set_fs(USER_DS);
+               fs = force_uaccess_begin();
                rem = __output_copy_user(handle, (void *) sp, dump_size);
-               set_fs(fs);
+               force_uaccess_end(fs);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
@@ -11707,7 +11706,7 @@ SYSCALL_DEFINE5(perf_event_open,
                        goto err_task;
 
                /*
-                * Reuse ptrace permission checks for now.
+                * Preserve ptrace permission check for backwards compatibility.
                 *
                 * We must hold exec_update_mutex across this and any potential
                 * perf_install_in_context() call for this new event to
@@ -11715,7 +11714,7 @@ SYSCALL_DEFINE5(perf_event_open,
                 * perf_event_exit_task() that could imply).
                 */
                err = -EACCES;
-               if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+               if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
                        goto err_cred;
        }
 
index 25de10c..0e18aaf 100644 (file)
@@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        if (new_page) {
                get_page(new_page);
                page_add_new_anon_rmap(new_page, vma, addr, false);
-               lru_cache_add_active_or_unevictable(new_page, vma);
+               lru_cache_add_inactive_or_unevictable(new_page, vma);
        } else
                /* no new page, just dec_mm_counter for old_page */
                dec_mm_counter(mm, MM_ANONPAGES);
@@ -205,7 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                try_to_free_swap(old_page);
        page_vma_mapped_walk_done(&pvmw);
 
-       if (vma->vm_flags & VM_LOCKED)
+       if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
                munlock_vma_page(old_page);
        put_page(old_page);
 
@@ -376,7 +376,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
        if (!vaddr || !d)
                return -EINVAL;
 
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1,
+       ret = get_user_pages_remote(mm, vaddr, 1,
                        FOLL_WRITE, &page, &vma, NULL);
        if (unlikely(ret <= 0)) {
                /*
@@ -477,7 +477,7 @@ retry:
        if (is_register)
                gup_flags |= FOLL_SPLIT_PMD;
        /* Read the page with vaddr into memory */
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
+       ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
                                    &old_page, &vma, NULL);
        if (ret <= 0)
                return ret;
@@ -2029,7 +2029,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
         * but we treat this as a 'remote' access since it is
         * essentially a kernel access to the memory.
         */
-       result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
+       result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
                        NULL, NULL);
        if (result < 0)
                return result;
index e731c41..733e80f 100644 (file)
@@ -732,7 +732,7 @@ void __noreturn do_exit(long code)
         * mm_release()->clear_child_tid() from writing to a user-controlled
         * kernel address.
         */
-       set_fs(USER_DS);
+       force_uaccess_begin();
 
        if (unlikely(in_atomic())) {
                pr_info("note: %s[%d] exited with preempt_count %d\n",
@@ -1626,6 +1626,22 @@ long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
        return ret;
 }
 
+int kernel_wait(pid_t pid, int *stat)
+{
+       struct wait_opts wo = {
+               .wo_type        = PIDTYPE_PID,
+               .wo_pid         = find_get_pid(pid),
+               .wo_flags       = WEXITED,
+       };
+       int ret;
+
+       ret = do_wait(&wo);
+       if (ret > 0 && wo.wo_stat)
+               *stat = wo.wo_stat;
+       put_pid(wo.wo_pid);
+       return ret;
+}
+
 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
                int, options, struct rusage __user *, ru)
 {
index 35e9894..4d32190 100644 (file)
@@ -2011,7 +2011,7 @@ static __latent_entropy struct task_struct *copy_process(
 #ifdef CONFIG_CPUSETS
        p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
        p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
-       seqcount_init(&p->mems_allowed_seq);
+       seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
        memset(&p->irqtrace, 0, sizeof(p->irqtrace));
index 8340412..a587669 100644 (file)
@@ -678,7 +678,7 @@ static int fault_in_user_writeable(u32 __user *uaddr)
        int ret;
 
        mmap_read_lock(mm);
-       ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
+       ret = fixup_user_fault(mm, (unsigned long)uaddr,
                               FAULT_FLAG_WRITE, NULL);
        mmap_read_unlock(mm);
 
@@ -3744,12 +3744,12 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
        switch (cmd) {
        case FUTEX_WAIT:
                val3 = FUTEX_BITSET_MATCH_ANY;
-               /* fall through */
+               fallthrough;
        case FUTEX_WAIT_BITSET:
                return futex_wait(uaddr, flags, val, timeout, val3);
        case FUTEX_WAKE:
                val3 = FUTEX_BITSET_MATCH_ANY;
-               /* fall through */
+               fallthrough;
        case FUTEX_WAKE_BITSET:
                return futex_wake(uaddr, flags, val, val3);
        case FUTEX_REQUEUE:
index d55ba62..52ac539 100644 (file)
@@ -2731,8 +2731,10 @@ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
 
        do {
                chip = irq_data_get_irq_chip(data);
-               if (WARN_ON_ONCE(!chip))
-                       return -ENODEV;
+               if (WARN_ON_ONCE(!chip)) {
+                       err = -ENODEV;
+                       goto out_unlock;
+               }
                if (chip->irq_set_irqchip_state)
                        break;
 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -2745,6 +2747,7 @@ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
        if (data)
                err = chip->irq_set_irqchip_state(data, which, val);
 
+out_unlock:
        irq_put_desc_busunlock(desc, flags);
        return err;
 }
index 8f557fa..c6c7e18 100644 (file)
@@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq)
        unsigned long flags;
        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
-       if (!desc || !(desc->istate & IRQS_SUSPENDED) ||
-           !irqd_is_wakeup_set(&desc->irq_data))
+       if (!desc)
                return;
 
+       if (!(desc->istate & IRQS_SUSPENDED) ||
+           !irqd_is_wakeup_set(&desc->irq_data))
+               goto unlock;
+
        desc->istate &= ~IRQS_SUSPENDED;
        irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
        __enable_irq(desc);
 
+unlock:
        irq_put_desc_busunlock(desc, flags);
 }
 
index 6afae0b..6b8368b 100644 (file)
@@ -96,7 +96,7 @@ struct kcov_percpu_data {
        int                     saved_sequence;
 };
 
-DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data);
+static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data);
 
 /* Must be called with kcov_remote_lock locked. */
 static struct kcov_remote *kcov_remote_find(u64 handle)
@@ -775,7 +775,7 @@ static inline bool kcov_mode_enabled(unsigned int mode)
        return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
 }
 
-void kcov_remote_softirq_start(struct task_struct *t)
+static void kcov_remote_softirq_start(struct task_struct *t)
 {
        struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
        unsigned int mode;
@@ -792,7 +792,7 @@ void kcov_remote_softirq_start(struct task_struct *t)
        }
 }
 
-void kcov_remote_softirq_stop(struct task_struct *t)
+static void kcov_remote_softirq_stop(struct task_struct *t)
 {
        struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
 
index 78c0837..ca40bef 100644 (file)
@@ -1169,24 +1169,26 @@ int crash_exclude_mem_range(struct crash_mem *mem,
                            unsigned long long mstart, unsigned long long mend)
 {
        int i, j;
-       unsigned long long start, end;
+       unsigned long long start, end, p_start, p_end;
        struct crash_mem_range temp_range = {0, 0};
 
        for (i = 0; i < mem->nr_ranges; i++) {
                start = mem->ranges[i].start;
                end = mem->ranges[i].end;
+               p_start = mstart;
+               p_end = mend;
 
                if (mstart > end || mend < start)
                        continue;
 
                /* Truncate any area outside of range */
                if (mstart < start)
-                       mstart = start;
+                       p_start = start;
                if (mend > end)
-                       mend = end;
+                       p_end = end;
 
                /* Found completely overlapping range */
-               if (mstart == start && mend == end) {
+               if (p_start == start && p_end == end) {
                        mem->ranges[i].start = 0;
                        mem->ranges[i].end = 0;
                        if (i < mem->nr_ranges - 1) {
@@ -1197,20 +1199,29 @@ int crash_exclude_mem_range(struct crash_mem *mem,
                                        mem->ranges[j].end =
                                                        mem->ranges[j+1].end;
                                }
+
+                               /*
+                                * Continue to check if there are another overlapping ranges
+                                * from the current position because of shifting the above
+                                * mem ranges.
+                                */
+                               i--;
+                               mem->nr_ranges--;
+                               continue;
                        }
                        mem->nr_ranges--;
                        return 0;
                }
 
-               if (mstart > start && mend < end) {
+               if (p_start > start && p_end < end) {
                        /* Split original range */
-                       mem->ranges[i].end = mstart - 1;
-                       temp_range.start = mend + 1;
+                       mem->ranges[i].end = p_start - 1;
+                       temp_range.start = p_end + 1;
                        temp_range.end = end;
-               } else if (mstart != start)
-                       mem->ranges[i].end = mstart - 1;
+               } else if (p_start != start)
+                       mem->ranges[i].end = p_start - 1;
                else
-                       mem->ranges[i].start = mend + 1;
+                       mem->ranges[i].start = p_end + 1;
                break;
        }
 
@@ -1247,7 +1258,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
        unsigned long long notes_addr;
        unsigned long mstart, mend;
 
-       /* extra phdr for vmcoreinfo elf note */
+       /* extra phdr for vmcoreinfo ELF note */
        nr_phdr = nr_cpus + 1;
        nr_phdr += mem->nr_ranges;
 
@@ -1255,7 +1266,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
         * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
         * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
         * I think this is required by tools like gdb. So same physical
-        * memory will be mapped in two elf headers. One will contain kernel
+        * memory will be mapped in two ELF headers. One will contain kernel
         * text virtual addresses and other will have __va(physical) addresses.
         */
 
@@ -1282,7 +1293,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
        ehdr->e_ehsize = sizeof(Elf64_Ehdr);
        ehdr->e_phentsize = sizeof(Elf64_Phdr);
 
-       /* Prepare one phdr of type PT_NOTE for each present cpu */
+       /* Prepare one phdr of type PT_NOTE for each present CPU */
        for_each_present_cpu(cpu) {
                phdr->p_type = PT_NOTE;
                notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
@@ -1324,10 +1335,10 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
                phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
                phdr->p_align = 0;
                ehdr->e_phnum++;
-               phdr++;
-               pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
+               pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
                        phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
                        ehdr->e_phnum, phdr->p_offset);
+               phdr++;
        }
 
        *addr = buf;
index 37c3c4b..3cd075c 100644 (file)
@@ -36,9 +36,8 @@
  *
  * If you need less than 50 threads would mean we're dealing with systems
  * smaller than 3200 pages. This assumes you are capable of having ~13M memory,
- * and this would only be an be an upper limit, after which the OOM killer
- * would take effect. Systems like these are very unlikely if modules are
- * enabled.
+ * and this would only be an upper limit, after which the OOM killer would take
+ * effect. Systems like these are very unlikely if modules are enabled.
  */
 #define MAX_KMOD_CONCURRENT 50
 static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
index b2807e7..3edaa38 100644 (file)
@@ -1258,8 +1258,7 @@ void kthread_use_mm(struct mm_struct *mm)
        if (active_mm != mm)
                mmdrop(active_mm);
 
-       to_kthread(tsk)->oldfs = get_fs();
-       set_fs(USER_DS);
+       to_kthread(tsk)->oldfs = force_uaccess_begin();
 }
 EXPORT_SYMBOL_GPL(kthread_use_mm);
 
@@ -1274,7 +1273,7 @@ void kthread_unuse_mm(struct mm_struct *mm)
        WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
        WARN_ON_ONCE(!tsk->mm);
 
-       set_fs(to_kthread(tsk)->oldfs);
+       force_uaccess_end(to_kthread(tsk)->oldfs);
 
        task_lock(tsk);
        sync_mm_rss(mm);
index 5525cd3..02ef87f 100644 (file)
@@ -423,7 +423,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
        seq_time(m, lt->min);
        seq_time(m, lt->max);
        seq_time(m, lt->total);
-       seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
+       seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
 }
 
 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
index 8fa2600..1c5cff3 100644 (file)
@@ -422,7 +422,7 @@ static bool each_symbol_in_section(const struct symsearch *arr,
 }
 
 /* Returns true as soon as fn returns true, otherwise false. */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+static bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
                                    struct module *owner,
                                    void *data),
                         void *data)
@@ -484,7 +484,6 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
        }
        return false;
 }
-EXPORT_SYMBOL_GPL(each_symbol_section);
 
 struct find_symbol_arg {
        /* Input */
@@ -496,6 +495,7 @@ struct find_symbol_arg {
        struct module *owner;
        const s32 *crc;
        const struct kernel_symbol *sym;
+       enum mod_license license;
 };
 
 static bool check_exported_symbol(const struct symsearch *syms,
@@ -505,9 +505,9 @@ static bool check_exported_symbol(const struct symsearch *syms,
        struct find_symbol_arg *fsa = data;
 
        if (!fsa->gplok) {
-               if (syms->licence == GPL_ONLY)
+               if (syms->license == GPL_ONLY)
                        return false;
-               if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
+               if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) {
                        pr_warn("Symbol %s is being used by a non-GPL module, "
                                "which will not be allowed in the future\n",
                                fsa->name);
@@ -529,6 +529,7 @@ static bool check_exported_symbol(const struct symsearch *syms,
        fsa->owner = owner;
        fsa->crc = symversion(syms->crcs, symnum);
        fsa->sym = &syms->start[symnum];
+       fsa->license = syms->license;
        return true;
 }
 
@@ -585,9 +586,10 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms,
 
 /* Find an exported symbol and return it, along with, (optional) crc and
  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
-const struct kernel_symbol *find_symbol(const char *name,
+static const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
                                        const s32 **crc,
+                                       enum mod_license *license,
                                        bool gplok,
                                        bool warn)
 {
@@ -602,13 +604,14 @@ const struct kernel_symbol *find_symbol(const char *name,
                        *owner = fsa.owner;
                if (crc)
                        *crc = fsa.crc;
+               if (license)
+                       *license = fsa.license;
                return fsa.sym;
        }
 
        pr_debug("Failed to find symbol %s\n", name);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(find_symbol);
 
 /*
  * Search for module by name: must hold module_mutex (or preempt disabled
@@ -869,7 +872,7 @@ static int add_module_usage(struct module *a, struct module *b)
 }
 
 /* Module a uses b: caller needs module_mutex() */
-int ref_module(struct module *a, struct module *b)
+static int ref_module(struct module *a, struct module *b)
 {
        int err;
 
@@ -888,7 +891,6 @@ int ref_module(struct module *a, struct module *b)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(ref_module);
 
 /* Clear the unload stuff of the module. */
 static void module_unload_free(struct module *mod)
@@ -1077,7 +1079,7 @@ void __symbol_put(const char *symbol)
        struct module *owner;
 
        preempt_disable();
-       if (!find_symbol(symbol, &owner, NULL, true, false))
+       if (!find_symbol(symbol, &owner, NULL, NULL, true, false))
                BUG();
        module_put(owner);
        preempt_enable();
@@ -1169,11 +1171,10 @@ static inline void module_unload_free(struct module *mod)
 {
 }
 
-int ref_module(struct module *a, struct module *b)
+static int ref_module(struct module *a, struct module *b)
 {
        return strong_try_module_get(b);
 }
-EXPORT_SYMBOL_GPL(ref_module);
 
 static inline int module_unload_init(struct module *mod)
 {
@@ -1356,7 +1357,7 @@ static inline int check_modstruct_version(const struct load_info *info,
         * locking is necessary -- use preempt_disable() to placate lockdep.
         */
        preempt_disable();
-       if (!find_symbol("module_layout", NULL, &crc, true, false)) {
+       if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) {
                preempt_enable();
                BUG();
        }
@@ -1430,6 +1431,24 @@ static int verify_namespace_is_imported(const struct load_info *info,
        return 0;
 }
 
+static bool inherit_taint(struct module *mod, struct module *owner)
+{
+       if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
+               return true;
+
+       if (mod->using_gplonly_symbols) {
+               pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
+                       mod->name, owner->name);
+               return false;
+       }
+
+       if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
+               pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
+                       mod->name, owner->name);
+               set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
+       }
+       return true;
+}
 
 /* Resolve a symbol for this module.  I.e. if we find one, record usage. */
 static const struct kernel_symbol *resolve_symbol(struct module *mod,
@@ -1440,6 +1459,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
        struct module *owner;
        const struct kernel_symbol *sym;
        const s32 *crc;
+       enum mod_license license;
        int err;
 
        /*
@@ -1449,11 +1469,19 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
         */
        sched_annotate_sleep();
        mutex_lock(&module_mutex);
-       sym = find_symbol(name, &owner, &crc,
+       sym = find_symbol(name, &owner, &crc, &license,
                          !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
        if (!sym)
                goto unlock;
 
+       if (license == GPL_ONLY)
+               mod->using_gplonly_symbols = true;
+
+       if (!inherit_taint(mod, owner)) {
+               sym = NULL;
+               goto getname;
+       }
+
        if (!check_version(info, name, mod, crc)) {
                sym = ERR_PTR(-EINVAL);
                goto getname;
@@ -2236,7 +2264,7 @@ void *__symbol_get(const char *symbol)
        const struct kernel_symbol *sym;
 
        preempt_disable();
-       sym = find_symbol(symbol, &owner, NULL, true, true);
+       sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
        if (sym && strong_try_module_get(owner))
                sym = NULL;
        preempt_enable();
@@ -2272,7 +2300,7 @@ static int verify_exported_symbols(struct module *mod)
        for (i = 0; i < ARRAY_SIZE(arr); i++) {
                for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
                        if (find_symbol(kernel_symbol_name(s), &owner, NULL,
-                                       true, false)) {
+                                       NULL, true, false)) {
                                pr_err("%s: exports duplicate symbol %s"
                                       " (owned by %s)\n",
                                       mod->name, kernel_symbol_name(s),
@@ -4489,7 +4517,6 @@ struct module *__module_address(unsigned long addr)
        }
        return mod;
 }
-EXPORT_SYMBOL_GPL(__module_address);
 
 /*
  * is_module_text_address - is this address inside module code?
@@ -4528,7 +4555,6 @@ struct module *__module_text_address(unsigned long addr)
        }
        return mod;
 }
-EXPORT_SYMBOL_GPL(__module_text_address);
 
 /* Don't grab lock, we're oopsing. */
 void print_modules(void)
index e2157ca..aef8872 100644 (file)
@@ -505,7 +505,7 @@ static void do_oops_enter_exit(void)
  * Return true if the calling CPU is allowed to print oops-related info.
  * This is a bit racy..
  */
-int oops_may_print(void)
+bool oops_may_print(void)
 {
        return pause_on_oops_flag == 0;
 }
@@ -551,7 +551,7 @@ static int init_oops_id(void)
 }
 late_initcall(init_oops_id);
 
-void print_oops_end_marker(void)
+static void print_oops_end_marker(void)
 {
        init_oops_id();
        pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
index 5714f51..f33769f 100644 (file)
@@ -795,6 +795,103 @@ int hibernate(void)
        return error;
 }
 
+/**
+ * hibernate_quiet_exec - Execute a function with all devices frozen.
+ * @func: Function to execute.
+ * @data: Data pointer to pass to @func.
+ *
+ * Return the @func return value or an error code if it cannot be executed.
+ */
+int hibernate_quiet_exec(int (*func)(void *data), void *data)
+{
+       int error, nr_calls = 0;
+
+       lock_system_sleep();
+
+       if (!hibernate_acquire()) {
+               error = -EBUSY;
+               goto unlock;
+       }
+
+       pm_prepare_console();
+
+       error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
+       if (error) {
+               nr_calls--;
+               goto exit;
+       }
+
+       error = freeze_processes();
+       if (error)
+               goto exit;
+
+       lock_device_hotplug();
+
+       pm_suspend_clear_flags();
+
+       error = platform_begin(true);
+       if (error)
+               goto thaw;
+
+       error = freeze_kernel_threads();
+       if (error)
+               goto thaw;
+
+       error = dpm_prepare(PMSG_FREEZE);
+       if (error)
+               goto dpm_complete;
+
+       suspend_console();
+
+       error = dpm_suspend(PMSG_FREEZE);
+       if (error)
+               goto dpm_resume;
+
+       error = dpm_suspend_end(PMSG_FREEZE);
+       if (error)
+               goto dpm_resume;
+
+       error = platform_pre_snapshot(true);
+       if (error)
+               goto skip;
+
+       error = func(data);
+
+skip:
+       platform_finish(true);
+
+       dpm_resume_start(PMSG_THAW);
+
+dpm_resume:
+       dpm_resume(PMSG_THAW);
+
+       resume_console();
+
+dpm_complete:
+       dpm_complete(PMSG_THAW);
+
+       thaw_kernel_threads();
+
+thaw:
+       platform_end(true);
+
+       unlock_device_hotplug();
+
+       thaw_processes();
+
+exit:
+       __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
+
+       pm_restore_console();
+
+       hibernate_release();
+
+unlock:
+       unlock_system_sleep();
+
+       return error;
+}
+EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
 
 /**
  * software_resume - Resume from a saved hibernation image.
index 72fe443..fb4e0c5 100644 (file)
@@ -197,6 +197,7 @@ free_buf:
 static void relay_destroy_channel(struct kref *kref)
 {
        struct rchan *chan = container_of(kref, struct rchan, kref);
+       free_percpu(chan->buf);
        kfree(chan);
 }
 
index 84758f3..8471a0f 100644 (file)
@@ -6431,10 +6431,10 @@ void sched_show_task(struct task_struct *p)
        if (!try_get_task_stack(p))
                return;
 
-       printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
+       pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
 
        if (p->state == TASK_RUNNING)
-               printk(KERN_CONT "  running task    ");
+               pr_cont("  running task    ");
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
@@ -6443,8 +6443,8 @@ void sched_show_task(struct task_struct *p)
        if (pid_alive(p))
                ppid = task_pid_nr(rcu_dereference(p->real_parent));
        rcu_read_unlock();
-       printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
-               task_pid_nr(p), ppid,
+       pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
+               free, task_pid_nr(p), ppid,
                (unsigned long)task_thread_info(p)->flags);
 
        print_worker_info(KERN_INFO, p);
@@ -6479,13 +6479,6 @@ void show_state_filter(unsigned long state_filter)
 {
        struct task_struct *g, *p;
 
-#if BITS_PER_LONG == 32
-       printk(KERN_INFO
-               "  task                PC stack   pid father\n");
-#else
-       printk(KERN_INFO
-               "  task                        PC stack   pid father\n");
-#endif
        rcu_read_lock();
        for_each_process_thread(g, p) {
                /*
index 3fd2838..28709f6 100644 (file)
@@ -1999,7 +1999,7 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
 {
        rq->nr_running -= count;
        if (trace_sched_update_nr_running_tp_enabled()) {
-               call_trace_sched_update_nr_running(rq, count);
+               call_trace_sched_update_nr_running(rq, -count);
        }
 
        /* Check if we still need preemption */
index 6f16f7c..42b67d2 100644 (file)
@@ -2541,7 +2541,21 @@ bool get_signal(struct ksignal *ksig)
 
 relock:
        spin_lock_irq(&sighand->siglock);
-       current->jobctl &= ~JOBCTL_TASK_WORK;
+       /*
+        * Make sure we can safely read ->jobctl() in task_work add. As Oleg
+        * states:
+        *
+        * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
+        * roughly have
+        *
+        *      task_work_add:                          get_signal:
+        *      STORE(task->task_works, new_work);      STORE(task->jobctl);
+        *      mb();                                   mb();
+        *      LOAD(task->jobctl);                     LOAD(task->task_works);
+        *
+        * and we can rely on STORE-MB-LOAD [ in task_work_add].
+        */
+       smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
        if (unlikely(current->task_works)) {
                spin_unlock_irq(&sighand->siglock);
                task_work_run();
index 2af66e4..946f44a 100644 (file)
@@ -233,10 +233,9 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
        if (current->flags & PF_KTHREAD)
                return 0;
 
-       fs = get_fs();
-       set_fs(USER_DS);
+       fs = force_uaccess_begin();
        arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
-       set_fs(fs);
+       force_uaccess_end(fs);
 
        return c.len;
 }
index 3b69a56..4d59775 100644 (file)
@@ -364,7 +364,6 @@ COND_SYSCALL(socketcall);
 COND_SYSCALL_COMPAT(socketcall);
 
 /* compat syscalls for arm64, x86, ... */
-COND_SYSCALL_COMPAT(sysctl);
 COND_SYSCALL_COMPAT(fanotify_mark);
 
 /* x86 */
index f785de3..287862f 100644 (file)
@@ -2852,6 +2852,15 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = sysctl_compaction_handler,
        },
        {
+               .procname       = "compaction_proactiveness",
+               .data           = &sysctl_compaction_proactiveness,
+               .maxlen         = sizeof(sysctl_compaction_proactiveness),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = &one_hundred,
+       },
+       {
                .procname       = "extfrag_threshold",
                .data           = &sysctl_extfrag_threshold,
                .maxlen         = sizeof(int),
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
deleted file mode 100644 (file)
index 7d550cc..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/stat.h>
-#include <linux/sysctl.h>
-#include "../fs/xfs/xfs_sysctl.h"
-#include <linux/sunrpc/debug.h>
-#include <linux/string.h>
-#include <linux/syscalls.h>
-#include <linux/namei.h>
-#include <linux/mount.h>
-#include <linux/fs.h>
-#include <linux/nsproxy.h>
-#include <linux/pid_namespace.h>
-#include <linux/file.h>
-#include <linux/ctype.h>
-#include <linux/netdevice.h>
-#include <linux/kernel.h>
-#include <linux/uuid.h>
-#include <linux/slab.h>
-#include <linux/compat.h>
-
-static ssize_t binary_sysctl(const int *name, int nlen,
-       void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
-       return -ENOSYS;
-}
-
-static void deprecated_sysctl_warning(const int *name, int nlen)
-{
-       int i;
-
-       /*
-        * CTL_KERN/KERN_VERSION is used by older glibc and cannot
-        * ever go away.
-        */
-       if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION)
-               return;
-
-       if (printk_ratelimit()) {
-               printk(KERN_INFO
-                       "warning: process `%s' used the deprecated sysctl "
-                       "system call with ", current->comm);
-               for (i = 0; i < nlen; i++)
-                       printk(KERN_CONT "%d.", name[i]);
-               printk(KERN_CONT "\n");
-       }
-       return;
-}
-
-#define WARN_ONCE_HASH_BITS 8
-#define WARN_ONCE_HASH_SIZE (1<<WARN_ONCE_HASH_BITS)
-
-static DECLARE_BITMAP(warn_once_bitmap, WARN_ONCE_HASH_SIZE);
-
-#define FNV32_OFFSET 2166136261U
-#define FNV32_PRIME 0x01000193
-
-/*
- * Print each legacy sysctl (approximately) only once.
- * To avoid making the tables non-const use a external
- * hash-table instead.
- * Worst case hash collision: 6, but very rarely.
- * NOTE! We don't use the SMP-safe bit tests. We simply
- * don't care enough.
- */
-static void warn_on_bintable(const int *name, int nlen)
-{
-       int i;
-       u32 hash = FNV32_OFFSET;
-
-       for (i = 0; i < nlen; i++)
-               hash = (hash ^ name[i]) * FNV32_PRIME;
-       hash %= WARN_ONCE_HASH_SIZE;
-       if (__test_and_set_bit(hash, warn_once_bitmap))
-               return;
-       deprecated_sysctl_warning(name, nlen);
-}
-
-static ssize_t do_sysctl(int __user *args_name, int nlen,
-       void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
-       int name[CTL_MAXNAME];
-       int i;
-
-       /* Check args->nlen. */
-       if (nlen < 0 || nlen > CTL_MAXNAME)
-               return -ENOTDIR;
-       /* Read in the sysctl name for simplicity */
-       for (i = 0; i < nlen; i++)
-               if (get_user(name[i], args_name + i))
-                       return -EFAULT;
-
-       warn_on_bintable(name, nlen);
-
-       return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen);
-}
-
-SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
-{
-       struct __sysctl_args tmp;
-       size_t oldlen = 0;
-       ssize_t result;
-
-       if (copy_from_user(&tmp, args, sizeof(tmp)))
-               return -EFAULT;
-
-       if (tmp.oldval && !tmp.oldlenp)
-               return -EFAULT;
-
-       if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp))
-               return -EFAULT;
-
-       result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen,
-                          tmp.newval, tmp.newlen);
-
-       if (result >= 0) {
-               oldlen = result;
-               result = 0;
-       }
-
-       if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp))
-               return -EFAULT;
-
-       return result;
-}
-
-
-#ifdef CONFIG_COMPAT
-
-struct compat_sysctl_args {
-       compat_uptr_t   name;
-       int             nlen;
-       compat_uptr_t   oldval;
-       compat_uptr_t   oldlenp;
-       compat_uptr_t   newval;
-       compat_size_t   newlen;
-       compat_ulong_t  __unused[4];
-};
-
-COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
-{
-       struct compat_sysctl_args tmp;
-       compat_size_t __user *compat_oldlenp;
-       size_t oldlen = 0;
-       ssize_t result;
-
-       if (copy_from_user(&tmp, args, sizeof(tmp)))
-               return -EFAULT;
-
-       if (tmp.oldval && !tmp.oldlenp)
-               return -EFAULT;
-
-       compat_oldlenp = compat_ptr(tmp.oldlenp);
-       if (compat_oldlenp && get_user(oldlen, compat_oldlenp))
-               return -EFAULT;
-
-       result = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
-                          compat_ptr(tmp.oldval), oldlen,
-                          compat_ptr(tmp.newval), tmp.newlen);
-
-       if (result >= 0) {
-               oldlen = result;
-               result = 0;
-       }
-
-       if (compat_oldlenp && put_user(oldlen, compat_oldlenp))
-               return -EFAULT;
-
-       return result;
-}
-
-#endif /* CONFIG_COMPAT */
index 5c0848c..613b2d6 100644 (file)
@@ -42,7 +42,13 @@ task_work_add(struct task_struct *task, struct callback_head *work, int notify)
                set_notify_resume(task);
                break;
        case TWA_SIGNAL:
-               if (lock_task_sighand(task, &flags)) {
+               /*
+                * Only grab the sighand lock if we don't already have some
+                * task_work pending. This pairs with the smp_store_mb()
+                * in get_signal(), see comment there.
+                */
+               if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) &&
+                   lock_task_sighand(task, &flags)) {
                        task->jobctl |= JOBCTL_TASK_WORK;
                        signal_wake_up(task, 0);
                        unlock_task_sighand(task, &flags);
index fcc4235..a09b1d6 100644 (file)
@@ -52,6 +52,15 @@ config GENERIC_CLOCKEVENTS_MIN_ADJUST
 config GENERIC_CMOS_UPDATE
        bool
 
+# Select to handle posix CPU timers from task_work
+# and not from the timer interrupt context
+config HAVE_POSIX_CPU_TIMERS_TASK_WORK
+       bool
+
+config POSIX_CPU_TIMERS_TASK_WORK
+       bool
+       default y if POSIX_TIMERS && HAVE_POSIX_CPU_TIMERS_TASK_WORK
+
 if GENERIC_CLOCKEVENTS
 menu "Timers subsystem"
 
index 2ffb466..ca223a8 100644 (file)
@@ -192,7 +192,7 @@ static void alarmtimer_dequeue(struct alarm_base *base, struct alarm *alarm)
  * When a alarm timer fires, this runs through the timerqueue to
  * see which alarms expired, and runs those. If there are more alarm
  * timers queued for the future, we set the hrtimer to fire when
- * when the next future alarm timer expires.
+ * the next future alarm timer expires.
  */
 static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
 {
index d89da1c..c403851 100644 (file)
@@ -135,7 +135,11 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
  * timer->base->cpu_base
  */
 static struct hrtimer_cpu_base migration_cpu_base = {
-       .clock_base = { { .cpu_base = &migration_cpu_base, }, },
+       .clock_base = { {
+               .cpu_base = &migration_cpu_base,
+               .seq      = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
+                                                    &migration_cpu_base.lock),
+       }, },
 };
 
 #define migration_base migration_cpu_base.clock_base[0]
@@ -1998,8 +2002,11 @@ int hrtimers_prepare_cpu(unsigned int cpu)
        int i;
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               cpu_base->clock_base[i].cpu_base = cpu_base;
-               timerqueue_init_head(&cpu_base->clock_base[i].active);
+               struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
+
+               clock_b->cpu_base = cpu_base;
+               seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
+               timerqueue_init_head(&clock_b->active);
        }
 
        cpu_base->cpu = cpu;
index 1651179..a71758e 100644 (file)
@@ -377,6 +377,7 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
  */
 static int posix_cpu_timer_create(struct k_itimer *new_timer)
 {
+       static struct lock_class_key posix_cpu_timers_key;
        struct pid *pid;
 
        rcu_read_lock();
@@ -386,6 +387,17 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
                return -EINVAL;
        }
 
+       /*
+        * If posix timer expiry is handled in task work context then
+        * timer::it_lock can be taken without disabling interrupts as all
+        * other locking happens in task context. This requires a seperate
+        * lock class key otherwise regular posix timer expiry would record
+        * the lock class being taken in interrupt context and generate a
+        * false positive warning.
+        */
+       if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
+               lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
+
        new_timer->kclock = &clock_posix_cpu;
        timerqueue_init(&new_timer->it.cpu.node);
        new_timer->it.cpu.pid = get_pid(pid);
@@ -1080,43 +1092,163 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
        return false;
 }
 
+static void handle_posix_cpu_timers(struct task_struct *tsk);
+
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+static void posix_cpu_timers_work(struct callback_head *work)
+{
+       handle_posix_cpu_timers(current);
+}
+
 /*
- * This is called from the timer interrupt handler.  The irq handler has
- * already updated our counts.  We need to check if any timers fire now.
- * Interrupts are disabled.
+ * Initialize posix CPU timers task work in init task. Out of line to
+ * keep the callback static and to avoid header recursion hell.
  */
-void run_posix_cpu_timers(void)
+void __init posix_cputimers_init_work(void)
 {
-       struct task_struct *tsk = current;
-       struct k_itimer *timer, *next;
-       unsigned long flags;
-       LIST_HEAD(firing);
+       init_task_work(&current->posix_cputimers_work.work,
+                      posix_cpu_timers_work);
+}
 
-       lockdep_assert_irqs_disabled();
+/*
+ * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
+ * in hard interrupt context or in task context with interrupts
+ * disabled. Aside of that the writer/reader interaction is always in the
+ * context of the current task, which means they are strict per CPU.
+ */
+static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
+{
+       return tsk->posix_cputimers_work.scheduled;
+}
 
-       /*
-        * The fast path checks that there are no expired thread or thread
-        * group timers.  If that's so, just return.
-        */
-       if (!fastpath_timer_check(tsk))
+static inline void __run_posix_cpu_timers(struct task_struct *tsk)
+{
+       if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
                return;
 
-       lockdep_posixtimer_enter();
-       if (!lock_task_sighand(tsk, &flags)) {
-               lockdep_posixtimer_exit();
-               return;
+       /* Schedule task work to actually expire the timers */
+       tsk->posix_cputimers_work.scheduled = true;
+       task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
+}
+
+static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
+                                               unsigned long start)
+{
+       bool ret = true;
+
+       /*
+        * On !RT kernels interrupts are disabled while collecting expired
+        * timers, so no tick can happen and the fast path check can be
+        * reenabled without further checks.
+        */
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+               tsk->posix_cputimers_work.scheduled = false;
+               return true;
        }
+
        /*
-        * Here we take off tsk->signal->cpu_timers[N] and
-        * tsk->cpu_timers[N] all the timers that are firing, and
-        * put them on the firing list.
+        * On RT enabled kernels ticks can happen while the expired timers
+        * are collected under sighand lock. But any tick which observes
+        * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
+        * checks. So reenabling the tick work has do be done carefully:
+        *
+        * Disable interrupts and run the fast path check if jiffies have
+        * advanced since the collecting of expired timers started. If
+        * jiffies have not advanced or the fast path check did not find
+        * newly expired timers, reenable the fast path check in the timer
+        * interrupt. If there are newly expired timers, return false and
+        * let the collection loop repeat.
         */
-       check_thread_timers(tsk, &firing);
+       local_irq_disable();
+       if (start != jiffies && fastpath_timer_check(tsk))
+               ret = false;
+       else
+               tsk->posix_cputimers_work.scheduled = false;
+       local_irq_enable();
+
+       return ret;
+}
+#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
+static inline void __run_posix_cpu_timers(struct task_struct *tsk)
+{
+       lockdep_posixtimer_enter();
+       handle_posix_cpu_timers(tsk);
+       lockdep_posixtimer_exit();
+}
+
+static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
+{
+       return false;
+}
+
+static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
+                                               unsigned long start)
+{
+       return true;
+}
+#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
+
+static void handle_posix_cpu_timers(struct task_struct *tsk)
+{
+       struct k_itimer *timer, *next;
+       unsigned long flags, start;
+       LIST_HEAD(firing);
+
+       if (!lock_task_sighand(tsk, &flags))
+               return;
 
-       check_process_timers(tsk, &firing);
+       do {
+               /*
+                * On RT locking sighand lock does not disable interrupts,
+                * so this needs to be careful vs. ticks. Store the current
+                * jiffies value.
+                */
+               start = READ_ONCE(jiffies);
+               barrier();
+
+               /*
+                * Here we take off tsk->signal->cpu_timers[N] and
+                * tsk->cpu_timers[N] all the timers that are firing, and
+                * put them on the firing list.
+                */
+               check_thread_timers(tsk, &firing);
+
+               check_process_timers(tsk, &firing);
+
+               /*
+                * The above timer checks have updated the exipry cache and
+                * because nothing can have queued or modified timers after
+                * sighand lock was taken above it is guaranteed to be
+                * consistent. So the next timer interrupt fastpath check
+                * will find valid data.
+                *
+                * If timer expiry runs in the timer interrupt context then
+                * the loop is not relevant as timers will be directly
+                * expired in interrupt context. The stub function below
+                * returns always true which allows the compiler to
+                * optimize the loop out.
+                *
+                * If timer expiry is deferred to task work context then
+                * the following rules apply:
+                *
+                * - On !RT kernels no tick can have happened on this CPU
+                *   after sighand lock was acquired because interrupts are
+                *   disabled. So reenabling task work before dropping
+                *   sighand lock and reenabling interrupts is race free.
+                *
+                * - On RT kernels ticks might have happened but the tick
+                *   work ignored posix CPU timer handling because the
+                *   CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
+                *   must be done very carefully including a check whether
+                *   ticks have happened since the start of the timer
+                *   expiry checks. posix_cpu_timers_enable_work() takes
+                *   care of that and eventually lets the expiry checks
+                *   run again.
+                */
+       } while (!posix_cpu_timers_enable_work(tsk, start));
 
        /*
-        * We must release these locks before taking any timer's lock.
+        * We must release sighand lock before taking any timer's lock.
         * There is a potential race with timer deletion here, as the
         * siglock now protects our private firing list.  We have set
         * the firing flag in each timer, so that a deletion attempt
@@ -1134,6 +1266,13 @@ void run_posix_cpu_timers(void)
        list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
                int cpu_firing;
 
+               /*
+                * spin_lock() is sufficient here even independent of the
+                * expiry context. If expiry happens in hard interrupt
+                * context it's obvious. For task work context it's safe
+                * because all other operations on timer::it_lock happen in
+                * task context (syscall or exit).
+                */
                spin_lock(&timer->it_lock);
                list_del_init(&timer->it.cpu.elist);
                cpu_firing = timer->it.cpu.firing;
@@ -1147,7 +1286,34 @@ void run_posix_cpu_timers(void)
                        cpu_timer_fire(timer);
                spin_unlock(&timer->it_lock);
        }
-       lockdep_posixtimer_exit();
+}
+
+/*
+ * This is called from the timer interrupt handler.  The irq handler has
+ * already updated our counts.  We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+void run_posix_cpu_timers(void)
+{
+       struct task_struct *tsk = current;
+
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * If the actual expiry is deferred to task work context and the
+        * work is already scheduled there is no point to do anything here.
+        */
+       if (posix_cpu_timers_work_scheduled(tsk))
+               return;
+
+       /*
+        * The fast path checks that there are no expired thread or thread
+        * group timers.  If that's so, just return.
+        */
+       if (!fastpath_timer_check(tsk))
+               return;
+
+       __run_posix_cpu_timers(tsk);
 }
 
 /*
index 0deaf4b..1c03eec 100644 (file)
@@ -229,7 +229,7 @@ void __init generic_sched_clock_init(void)
 {
        /*
         * If no sched_clock() function has been provided at that point,
-        * make it the final one one.
+        * make it the final one.
         */
        if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
                sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
index 63a632f..4c47f38 100644 (file)
@@ -39,18 +39,19 @@ enum timekeeping_adv_mode {
        TK_ADV_FREQ
 };
 
+DEFINE_RAW_SPINLOCK(timekeeper_lock);
+
 /*
  * The most important data for readout fits into a single 64 byte
  * cache line.
  */
 static struct {
-       seqcount_t              seq;
+       seqcount_raw_spinlock_t seq;
        struct timekeeper       timekeeper;
 } tk_core ____cacheline_aligned = {
-       .seq = SEQCNT_ZERO(tk_core.seq),
+       .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
 };
 
-static DEFINE_RAW_SPINLOCK(timekeeper_lock);
 static struct timekeeper shadow_timekeeper;
 
 /**
@@ -63,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
  * See @update_fast_timekeeper() below.
  */
 struct tk_fast {
-       seqcount_t              seq;
+       seqcount_raw_spinlock_t seq;
        struct tk_read_base     base[2];
 };
 
@@ -80,11 +81,13 @@ static struct clocksource dummy_clock = {
 };
 
 static struct tk_fast tk_fast_mono ____cacheline_aligned = {
+       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
        .base[0] = { .clock = &dummy_clock, },
        .base[1] = { .clock = &dummy_clock, },
 };
 
 static struct tk_fast tk_fast_raw  ____cacheline_aligned = {
+       .seq     = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
        .base[0] = { .clock = &dummy_clock, },
        .base[1] = { .clock = &dummy_clock, },
 };
@@ -157,7 +160,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
  * tk_clock_read - atomic clocksource read() helper
  *
  * This helper is necessary to use in the read paths because, while the
- * seqlock ensures we don't return a bad value while structures are updated,
+ * seqcount ensures we don't return a bad value while structures are updated,
  * it doesn't protect from potential crashes. There is the possibility that
  * the tkr's clocksource may change between the read reference, and the
  * clock reference passed to the read function.  This can cause crashes if
@@ -222,10 +225,10 @@ static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
        unsigned int seq;
 
        /*
-        * Since we're called holding a seqlock, the data may shift
+        * Since we're called holding a seqcount, the data may shift
         * under us while we're doing the calculation. This can cause
         * false positives, since we'd note a problem but throw the
-        * results away. So nest another seqlock here to atomically
+        * results away. So nest another seqcount here to atomically
         * grab the points we are checking with.
         */
        do {
@@ -486,7 +489,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
  *
  * To keep it NMI safe since we're accessing from tracing, we're not using a
  * separate timekeeper with updates to monotonic clock and boot offset
- * protected with seqlocks. This has the following minor side effects:
+ * protected with seqcounts. This has the following minor side effects:
  *
  * (1) Its possible that a timestamp be taken after the boot offset is updated
  * but before the timekeeper is updated. If this happens, the new boot offset
@@ -2001,7 +2004,7 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
  * logarithmic_accumulation - shifted accumulation of cycles
  *
  * This functions accumulates a shifted interval of cycles into
- * into a shifted interval nanoseconds. Allows for O(log) accumulation
+ * a shifted interval nanoseconds. Allows for O(log) accumulation
  * loop.
  *
  * Returns the unconsumed cycles.
index bcbb52d..4ca2787 100644 (file)
@@ -1,12 +1,14 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _TIMEKEEPING_INTERNAL_H
 #define _TIMEKEEPING_INTERNAL_H
-/*
- * timekeeping debug functions
- */
+
 #include <linux/clocksource.h>
+#include <linux/spinlock.h>
 #include <linux/time.h>
 
+/*
+ * timekeeping debug functions
+ */
 #ifdef CONFIG_DEBUG_FS
 extern void tk_debug_account_sleep_time(const struct timespec64 *t);
 #else
@@ -31,4 +33,7 @@ static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
 }
 #endif
 
+/* Semi public for serialization of non timekeeper VDSO updates. */
+extern raw_spinlock_t timekeeper_lock;
+
 #endif /* _TIMEKEEPING_INTERNAL_H */
index ae5029f..a16764b 100644 (file)
@@ -2017,6 +2017,7 @@ static void __init init_timer_cpus(void)
 void __init init_timers(void)
 {
        init_timer_cpus();
+       posix_cputimers_init_work();
        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
 
index 54ce6eb..88e6b8e 100644 (file)
@@ -13,6 +13,8 @@
 #include <vdso/helpers.h>
 #include <vdso/vsyscall.h>
 
+#include "timekeeping_internal.h"
+
 static inline void update_vdso_data(struct vdso_data *vdata,
                                    struct timekeeper *tk)
 {
@@ -127,3 +129,42 @@ void update_vsyscall_tz(void)
 
        __arch_sync_vdso_data(vdata);
 }
+
+/**
+ * vdso_update_begin - Start of a VDSO update section
+ *
+ * Allows architecture code to safely update the architecture specific VDSO
+ * data. Disables interrupts, acquires timekeeper lock to serialize against
+ * concurrent updates from timekeeping and invalidates the VDSO data
+ * sequence counter to prevent concurrent readers from accessing
+ * inconsistent data.
+ *
+ * Returns: Saved interrupt flags which need to be handed in to
+ * vdso_update_end().
+ */
+unsigned long vdso_update_begin(void)
+{
+       struct vdso_data *vdata = __arch_get_k_vdso_data();
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&timekeeper_lock, flags);
+       vdso_write_begin(vdata);
+       return flags;
+}
+
+/**
+ * vdso_update_end - End of a VDSO update section
+ * @flags:     Interrupt flags as returned from vdso_update_begin()
+ *
+ * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
+ * synchronization if the architecture requires it, drops timekeeper lock
+ * and restores interrupt flags.
+ */
+void vdso_update_end(unsigned long flags)
+{
+       struct vdso_data *vdata = __arch_get_k_vdso_data();
+
+       vdso_write_end(vdata);
+       __arch_sync_vdso_data(vdata);
+       raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+}
index aeba5ee..e153be3 100644 (file)
@@ -2,9 +2,9 @@
 
 # Do not instrument the tracer itself:
 
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
+
 ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 
 # Avoid recursion due to instrumentation.
 KCSAN_SANITIZE := n
index cb91ef9..a8d4f25 100644 (file)
@@ -383,7 +383,7 @@ static DEFINE_RAW_SPINLOCK(trace_printk_lock);
 
 #define BPF_TRACE_PRINTK_SIZE   1024
 
-static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
+static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
 {
        static char buf[BPF_TRACE_PRINTK_SIZE];
        unsigned long flags;
index a25433f..fcf3ee8 100644 (file)
@@ -119,37 +119,16 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
 {
        pid_t pid;
 
-       /* If SIGCLD is ignored kernel_wait4 won't populate the status. */
+       /* If SIGCLD is ignored do_wait won't populate the status. */
        kernel_sigaction(SIGCHLD, SIG_DFL);
        pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
-       if (pid < 0) {
+       if (pid < 0)
                sub_info->retval = pid;
-       } else {
-               int ret = -ECHILD;
-               /*
-                * Normally it is bogus to call wait4() from in-kernel because
-                * wait4() wants to write the exit code to a userspace address.
-                * But call_usermodehelper_exec_sync() always runs as kernel
-                * thread (workqueue) and put_user() to a kernel address works
-                * OK for kernel threads, due to their having an mm_segment_t
-                * which spans the entire address space.
-                *
-                * Thus the __user pointer cast is valid here.
-                */
-               kernel_wait4(pid, (int __user *)&ret, 0, NULL);
-
-               /*
-                * If ret is 0, either call_usermodehelper_exec_async failed and
-                * the real error code is already in sub_info->retval or
-                * sub_info->retval is 0 anyway, so don't mess with it then.
-                */
-               if (ret)
-                       sub_info->retval = ret;
-       }
+       else
+               kernel_wait(pid, &sub_info->retval);
 
        /* Restore default kernel sig handler */
        kernel_sigaction(SIGCHLD, SIG_IGN);
-
        umh_complete(sub_info);
 }
 
index f74020f..0ef8f65 100644 (file)
@@ -393,6 +393,7 @@ static void free_watch(struct rcu_head *rcu)
        struct watch *watch = container_of(rcu, struct watch, rcu);
 
        put_watch_queue(rcu_access_pointer(watch->queue));
+       atomic_dec(&watch->cred->user->nr_watches);
        put_cred(watch->cred);
 }
 
@@ -452,6 +453,13 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
        watch->cred = get_current_cred();
        rcu_assign_pointer(watch->watch_list, wlist);
 
+       if (atomic_inc_return(&watch->cred->user->nr_watches) >
+           task_rlimit(current, RLIMIT_NOFILE)) {
+               atomic_dec(&watch->cred->user->nr_watches);
+               put_cred(watch->cred);
+               return -EAGAIN;
+       }
+
        spin_lock_bh(&wqueue->lock);
        kref_get(&wqueue->usage);
        kref_get(&watch->usage);
index a164785..e068c3c 100644 (file)
@@ -365,6 +365,17 @@ config SECTION_MISMATCH_WARN_ONLY
 
          If unsure, say Y.
 
+config DEBUG_FORCE_FUNCTION_ALIGN_32B
+       bool "Force all function address 32B aligned" if EXPERT
+       help
+         There are cases that a commit from one domain changes the function
+         address alignment of other domains, and cause magic performance
+         bump (regression or improvement). Enable this option will help to
+         verify if the bump is caused by function alignment changes, while
+         it will slightly increase the kernel size and affect icache usage.
+
+         It is mainly for debug and performance tuning use.
+
 #
 # Select this config option from the architecture Kconfig, if it
 # is preferred to always offer frame pointers as a config
@@ -906,7 +917,7 @@ config PANIC_TIMEOUT
        int "panic timeout"
        default 0
        help
-         Set the timeout value (in seconds) until a reboot occurs when the
+         Set the timeout value (in seconds) until a reboot occurs when
          the kernel panics. If n = 0, then we wait forever. A timeout
          value n > 0 will wait n seconds before rebooting, while a timeout
          value n < 0 will reboot immediately.
@@ -1067,6 +1078,7 @@ config WQ_WATCHDOG
 
 config TEST_LOCKUP
        tristate "Test module to generate lockups"
+       depends on m
        help
          This builds the "test_lockup" module that helps to make sure
          that watchdogs and lockup detectors are working properly.
@@ -2203,7 +2215,7 @@ config LIST_KUNIT_TEST
          and associated macros.
 
          KUnit tests run during boot and output the results to the debug log
-         in TAP format (http://testanything.org/). Only useful for kernel devs
+         in TAP format (https://testanything.org/). Only useful for kernel devs
          running the KUnit test harness, and not intended for inclusion into a
          production build.
 
@@ -2224,6 +2236,17 @@ config LINEAR_RANGES_TEST
 
          If unsure, say N.
 
+config BITS_TEST
+       tristate "KUnit test for bits.h"
+       depends on KUNIT
+       help
+         This builds the bits unit test.
+         Tests the logic of macros defined in bits.h.
+         For more information on KUnit and unit tests in general please refer
+         to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+         If unsure, say N.
+
 config TEST_UDELAY
        tristate "udelay test driver"
        help
index f399621..a4a4c68 100644 (file)
@@ -3,10 +3,7 @@
 # Makefile for some libs needed in the kernel.
 #
 
-ifdef CONFIG_FUNCTION_TRACER
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
-endif
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
 
 # These files are disabled because they produce lots of non-interesting and/or
 # flaky coverage that is not a function of syscall inputs. For example,
@@ -18,11 +15,16 @@ KCOV_INSTRUMENT_debugobjects.o := n
 KCOV_INSTRUMENT_dynamic_debug.o := n
 KCOV_INSTRUMENT_fault-inject.o := n
 
+# string.o implements standard library functions like memset/memcpy etc.
+# Use -ffreestanding to ensure that the compiler does not try to "optimize"
+# them into calls to themselves.
+CFLAGS_string.o := -ffreestanding
+
 # Early boot use of cmdline, don't instrument it
 ifdef CONFIG_AMD_MEM_ENCRYPT
 KASAN_SANITIZE_string.o := n
 
-CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+CFLAGS_string.o += -fno-stack-protector
 endif
 
 # Used by KCSAN while enabled, avoid recursion.
@@ -324,7 +326,7 @@ endif
 UBSAN_SANITIZE_ubsan.o := n
 KASAN_SANITIZE_ubsan.o := n
 KCSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
 
 obj-$(CONFIG_SBITMAP) += sbitmap.o
 
@@ -345,3 +347,4 @@ obj-$(CONFIG_PLDMFW) += pldmfw/
 # KUnit tests
 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
index 0364452..c13d859 100644 (file)
@@ -212,13 +212,13 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src,
        unsigned long keep = 0, carry;
        int i;
 
-       memmove(dst, src, len * sizeof(*dst));
-
        if (first % BITS_PER_LONG) {
                keep = src[first / BITS_PER_LONG] &
                       (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
        }
 
+       memmove(dst, src, len * sizeof(*dst));
+
        while (cut--) {
                for (i = first / BITS_PER_LONG; i < len; i++) {
                        if (i < len - 1)
index f8928ce..47cfa05 100644 (file)
@@ -4,7 +4,7 @@
  *
  * This is a basic crc64 implementation following ECMA-182 specification,
  * which can be found from,
- * http://www.ecma-international.org/publications/standards/Ecma-182.htm
+ * https://www.ecma-international.org/publications/standards/Ecma-182.htm
  *
  * Dr. Ross N. Williams has a great document to introduce the idea of CRC
  * algorithm, here the CRC64 code is also inspired by the table-driven
index 7c4932e..f9628f3 100644 (file)
@@ -34,7 +34,7 @@
                Phone (337) 232-1234 or 1-800-738-2226
                Fax   (337) 232-1297
 
-               http://www.hospiceacadiana.com/
+               https://www.hospiceacadiana.com/
 
        Manuel
  */
index ed7a1fd..1cf409e 100644 (file)
@@ -8,7 +8,7 @@
  *implementation for lzma.
  *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
  *
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
  *Copyright (C) 1999-2005  Igor Pavlov
  *
  *Copyrights of the parts, see headers below.
@@ -56,7 +56,7 @@ static long long INIT read_int(unsigned char *ptr, int size)
 /* Small range coder implementation for lzma.
  *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
  *
- *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
  *Copyright (c) 1999-2005  Igor Pavlov
  */
 
@@ -213,7 +213,7 @@ rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
  * Small lzma deflate implementation.
  * Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
  *
- * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
  * Copyright (C) 1999-2005  Igor Pavlov
  */
 
index e909ab7..fbaa3e8 100644 (file)
@@ -70,27 +70,27 @@ static void bad_io_access(unsigned long port, const char *access)
 #define mmio_read64be(addr) swab64(readq(addr))
 #endif
 
-unsigned int ioread8(void __iomem *addr)
+unsigned int ioread8(const void __iomem *addr)
 {
        IO_COND(addr, return inb(port), return readb(addr));
        return 0xff;
 }
-unsigned int ioread16(void __iomem *addr)
+unsigned int ioread16(const void __iomem *addr)
 {
        IO_COND(addr, return inw(port), return readw(addr));
        return 0xffff;
 }
-unsigned int ioread16be(void __iomem *addr)
+unsigned int ioread16be(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
        return 0xffff;
 }
-unsigned int ioread32(void __iomem *addr)
+unsigned int ioread32(const void __iomem *addr)
 {
        IO_COND(addr, return inl(port), return readl(addr));
        return 0xffffffff;
 }
-unsigned int ioread32be(void __iomem *addr)
+unsigned int ioread32be(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
        return 0xffffffff;
@@ -142,26 +142,26 @@ static u64 pio_read64be_hi_lo(unsigned long port)
        return lo | (hi << 32);
 }
 
-u64 ioread64_lo_hi(void __iomem *addr)
+u64 ioread64_lo_hi(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
        return 0xffffffffffffffffULL;
 }
 
-u64 ioread64_hi_lo(void __iomem *addr)
+u64 ioread64_hi_lo(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
        return 0xffffffffffffffffULL;
 }
 
-u64 ioread64be_lo_hi(void __iomem *addr)
+u64 ioread64be_lo_hi(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read64be_lo_hi(port),
                return mmio_read64be(addr));
        return 0xffffffffffffffffULL;
 }
 
-u64 ioread64be_hi_lo(void __iomem *addr)
+u64 ioread64be_hi_lo(const void __iomem *addr)
 {
        IO_COND(addr, return pio_read64be_hi_lo(port),
                return mmio_read64be(addr));
@@ -275,7 +275,7 @@ EXPORT_SYMBOL(iowrite64be_hi_lo);
  * order" (we also don't have IO barriers).
  */
 #ifndef mmio_insb
-static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
 {
        while (--count >= 0) {
                u8 data = __raw_readb(addr);
@@ -283,7 +283,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
                dst++;
        }
 }
-static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
 {
        while (--count >= 0) {
                u16 data = __raw_readw(addr);
@@ -291,7 +291,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
                dst++;
        }
 }
-static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
 {
        while (--count >= 0) {
                u32 data = __raw_readl(addr);
@@ -325,15 +325,15 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
 }
 #endif
 
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
 }
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
 }
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
 {
        IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
 }
index 1006bf7..a14ccf9 100644 (file)
@@ -115,8 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoull(). Return code must be checked.
  */
 int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
 {
@@ -139,8 +138,7 @@ EXPORT_SYMBOL(kstrtoull);
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoll(). Return code must be checked.
  */
 int kstrtoll(const char *s, unsigned int base, long long *res)
 {
@@ -211,8 +209,7 @@ EXPORT_SYMBOL(_kstrtol);
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
  */
 int kstrtouint(const char *s, unsigned int base, unsigned int *res)
 {
@@ -242,8 +239,7 @@ EXPORT_SYMBOL(kstrtouint);
  * @res: Where to write the result of the conversion on success.
  *
  * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
  */
 int kstrtoint(const char *s, unsigned int base, int *res)
 {
index 295b94b..dcc912b 100644 (file)
@@ -12,7 +12,3 @@ obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
                                test_klp_state.o \
                                test_klp_state2.o \
                                test_klp_state3.o
-
-# Target modules to be livepatched require CC_FLAGS_FTRACE
-CFLAGS_test_klp_callbacks_busy.o       += $(CC_FLAGS_FTRACE)
-CFLAGS_test_klp_callbacks_mod.o                += $(CC_FLAGS_FTRACE)
index cc7b6d4..90bb679 100644 (file)
@@ -446,7 +446,7 @@ _last_literals:
                        *op++ = (BYTE)(lastRun << ML_BITS);
                }
 
-               memcpy(op, anchor, lastRun);
+               LZ4_memcpy(op, anchor, lastRun);
 
                op += lastRun;
        }
@@ -708,7 +708,7 @@ _last_literals:
                } else {
                        *op++ = (BYTE)(lastRunSize<<ML_BITS);
                }
-               memcpy(op, anchor, lastRunSize);
+               LZ4_memcpy(op, anchor, lastRunSize);
                op += lastRunSize;
        }
 
index 5371dab..00cb0d0 100644 (file)
@@ -153,7 +153,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
                   && likely((endOnInput ? ip < shortiend : 1) &
                             (op <= shortoend))) {
                        /* Copy the literals */
-                       memcpy(op, ip, endOnInput ? 16 : 8);
+                       LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
                        op += length; ip += length;
 
                        /*
@@ -172,9 +172,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
                            (offset >= 8) &&
                            (dict == withPrefix64k || match >= lowPrefix)) {
                                /* Copy the match. */
-                               memcpy(op + 0, match + 0, 8);
-                               memcpy(op + 8, match + 8, 8);
-                               memcpy(op + 16, match + 16, 2);
+                               LZ4_memcpy(op + 0, match + 0, 8);
+                               LZ4_memcpy(op + 8, match + 8, 8);
+                               LZ4_memcpy(op + 16, match + 16, 2);
                                op += length + MINMATCH;
                                /* Both stages worked, load the next token. */
                                continue;
@@ -263,7 +263,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
                                }
                        }
 
-                       memcpy(op, ip, length);
+                       LZ4_memcpy(op, ip, length);
                        ip += length;
                        op += length;
 
@@ -350,7 +350,7 @@ _copy_match:
                                size_t const copySize = (size_t)(lowPrefix - match);
                                size_t const restSize = length - copySize;
 
-                               memcpy(op, dictEnd - copySize, copySize);
+                               LZ4_memcpy(op, dictEnd - copySize, copySize);
                                op += copySize;
                                if (restSize > (size_t)(op - lowPrefix)) {
                                        /* overlap copy */
@@ -360,7 +360,7 @@ _copy_match:
                                        while (op < endOfMatch)
                                                *op++ = *copyFrom++;
                                } else {
-                                       memcpy(op, lowPrefix, restSize);
+                                       LZ4_memcpy(op, lowPrefix, restSize);
                                        op += restSize;
                                }
                        }
@@ -386,7 +386,7 @@ _copy_match:
                                while (op < copyEnd)
                                        *op++ = *match++;
                        } else {
-                               memcpy(op, match, mlen);
+                               LZ4_memcpy(op, match, mlen);
                        }
                        op = copyEnd;
                        if (op == oend)
@@ -400,7 +400,7 @@ _copy_match:
                        op[2] = match[2];
                        op[3] = match[3];
                        match += inc32table[offset];
-                       memcpy(op + 4, match, 4);
+                       LZ4_memcpy(op + 4, match, 4);
                        match -= dec64table[offset];
                } else {
                        LZ4_copy8(op, match);
index 1a7fa9d..c91dd96 100644 (file)
@@ -137,6 +137,16 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
        return put_unaligned_le16(value, memPtr);
 }
 
+/*
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so apply its specialized memcpy() inlining logic. When
+ * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy()
+ * as-if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+
 static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
 {
 #if LZ4_ARCH64
index 1b61d87..e7ac869 100644 (file)
@@ -570,7 +570,7 @@ _Search3:
                        *op++ = (BYTE) lastRun;
                } else
                        *op++ = (BYTE)(lastRun<<ML_BITS);
-               memcpy(op, anchor, iend - anchor);
+               LZ4_memcpy(op, anchor, iend - anchor);
                op += iend - anchor;
        }
 
index 31fb27d..df75c88 100644 (file)
@@ -27,7 +27,7 @@
  * with the fractional part size described in given_denominator.
  *
  * for theoretical background, see:
- * http://en.wikipedia.org/wiki/Continued_fraction
+ * https://en.wikipedia.org/wiki/Continued_fraction
  */
 
 void rational_best_approximation(
index 3d749ab..9323453 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/random.h>
 #include <linux/sched.h>
 #include <asm/unaligned.h>
+#include <trace/events/random.h>
 
 #ifdef CONFIG_RANDOM32_SELFTEST
 static void __init prandom_state_selftest(void);
@@ -82,6 +83,7 @@ u32 prandom_u32(void)
        u32 res;
 
        res = prandom_u32_state(state);
+       trace_prandom_u32(res);
        put_cpu_var(net_rand_state);
 
        return res;
index 8545872..c4ac5c2 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/export.h>
 
 /*
- * red-black trees properties:  http://en.wikipedia.org/wiki/Rbtree
+ * red-black trees properties:  https://en.wikipedia.org/wiki/Rbtree
  *
  *  1) A node is either red or black
  *  2) The root is black
index 6b13150..df903c5 100644 (file)
@@ -610,6 +610,63 @@ static void __init test_for_each_set_clump8(void)
                expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
 }
 
+struct test_bitmap_cut {
+       unsigned int first;
+       unsigned int cut;
+       unsigned int nbits;
+       unsigned long in[4];
+       unsigned long expected[4];
+};
+
+static struct test_bitmap_cut test_cut[] = {
+       {  0,  0,  8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
+       {  0,  0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
+       {  0,  3,  8, { 0x000000aaUL, }, { 0x00000015UL, }, },
+       {  3,  3,  8, { 0x000000aaUL, }, { 0x00000012UL, }, },
+       {  0,  1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
+       {  0,  8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
+       {  1,  1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
+       {  0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
+       {  0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+       { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
+       { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+       { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
+
+       { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
+               { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+               { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+       },
+       { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
+               { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+               { 0x00000001UL, 0x00000001UL, },
+       },
+
+       { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
+               { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
+               { 0x00000001UL, },
+       },
+       { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
+               { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
+               { 0x2d2dffffUL, },
+       },
+};
+
+static void __init test_bitmap_cut(void)
+{
+       unsigned long b[5], *in = &b[1], *out = &b[0];  /* Partial overlap */
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
+               struct test_bitmap_cut *t = &test_cut[i];
+
+               memcpy(in, t->in, sizeof(t->in));
+
+               bitmap_cut(out, in, t->first, t->cut, t->nbits);
+
+               expect_eq_bitmap(t->expected, out, t->nbits);
+       }
+}
+
 static void __init selftest(void)
 {
        test_zero_clear();
@@ -623,6 +680,7 @@ static void __init selftest(void)
        test_bitmap_parselist_user();
        test_mem_optimisations();
        test_for_each_set_clump8();
+       test_bitmap_cut();
 }
 
 KSTM_MODULE_LOADERS(test_bitmap);
index ced25e3..471141d 100644 (file)
@@ -52,9 +52,9 @@ static unsigned long order_comb_long[][2] = {
 
 static int __init test_bitops_startup(void)
 {
-       int i;
+       int i, bit_set;
 
-       pr_warn("Loaded test module\n");
+       pr_info("Starting bitops test\n");
        set_bit(BITOPS_4, g_bitmap);
        set_bit(BITOPS_7, g_bitmap);
        set_bit(BITOPS_11, g_bitmap);
@@ -81,12 +81,8 @@ static int __init test_bitops_startup(void)
                                       order_comb_long[i][0]);
        }
 #endif
-       return 0;
-}
 
-static void __exit test_bitops_unstartup(void)
-{
-       int bit_set;
+       barrier();
 
        clear_bit(BITOPS_4, g_bitmap);
        clear_bit(BITOPS_7, g_bitmap);
@@ -98,7 +94,13 @@ static void __exit test_bitops_unstartup(void)
        if (bit_set != BITOPS_LAST)
                pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
 
-       pr_warn("Unloaded test module\n");
+       pr_info("Completed bitops test\n");
+
+       return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
 }
 
 module_init(test_bitops_startup);
diff --git a/lib/test_bits.c b/lib/test_bits.c
new file mode 100644 (file)
index 0000000..c9368a2
--- /dev/null
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+       KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+       KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+       KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+       KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+       /* these should fail compilation */
+       GENMASK(0, 1);
+       GENMASK(0, 10);
+       GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+       KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+       KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+       KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+       KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+       /* these should fail compilation */
+       GENMASK_ULL(0, 1);
+       GENMASK_ULL(0, 10);
+       GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+       unsigned int x, y;
+       int z, w;
+
+       /* Unknown input */
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+       /* Valid input */
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+       KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+       KUNIT_CASE(genmask_test),
+       KUNIT_CASE(genmask_ull_test),
+       KUNIT_CASE(genmask_input_check_test),
+       {}
+};
+
+static struct kunit_suite bits_test_suite = {
+       .name = "bits-test",
+       .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_LICENSE("GPL");
index e651c37..eab5277 100644 (file)
@@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
                break;
        case TEST_KMOD_FS_TYPE:
                kfree_const(config->test_fs);
-               config->test_driver = NULL;
+               config->test_fs = NULL;
                copied = config_copy_test_fs(config, test_str,
                                             strlen(test_str));
                break;
index ff26f36..f1a020b 100644 (file)
@@ -400,7 +400,7 @@ static void test_lockup(bool master)
        test_unlock(master, true);
 }
 
-DEFINE_PER_CPU(struct work_struct, test_works);
+static DEFINE_PER_CPU(struct work_struct, test_works);
 
 static void test_work_fn(struct work_struct *work)
 {
@@ -512,8 +512,8 @@ static int __init test_lockup_init(void)
        if (test_file_path[0]) {
                test_file = filp_open(test_file_path, O_RDONLY, 0);
                if (IS_ERR(test_file)) {
-                       pr_err("cannot find file_path\n");
-                       return -EINVAL;
+                       pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
+                       return PTR_ERR(test_file);
                }
                test_inode = file_inode(test_file);
        } else if (test_lock_inode ||
index 277cb44..4cf2500 100644 (file)
@@ -11,7 +11,7 @@
  *   [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
  *       Communications of the Association for Computing Machinery, 
  *       20(10), 1977, pp. 762-772.
- *       http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
+ *       https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
  *
  *   [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
  *       http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
index bcc9a98..2919f16 100644 (file)
@@ -68,7 +68,7 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
                if (unlikely(!vdso_clocksource_ok(vd)))
                        return -1;
 
-               cycles = __arch_get_hw_counter(vd->clock_mode);
+               cycles = __arch_get_hw_counter(vd->clock_mode, vd);
                if (unlikely(!vdso_cycles_ok(cycles)))
                        return -1;
                ns = vdso_ts->nsec;
@@ -138,7 +138,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
                if (unlikely(!vdso_clocksource_ok(vd)))
                        return -1;
 
-               cycles = __arch_get_hw_counter(vd->clock_mode);
+               cycles = __arch_get_hw_counter(vd->clock_mode, vd);
                if (unlikely(!vdso_cycles_ok(cycles)))
                        return -1;
                ns = vdso_ts->nsec;
index aa61e2a..d5bb9ff 100644 (file)
@@ -34,7 +34,7 @@
  * ("BSD").
  *
  * You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
  * - xxHash source repository: https://github.com/Cyan4973/xxHash
  */
 
index 912aae5..88a2c35 100644 (file)
@@ -2,7 +2,7 @@
  * CRC32 using the polynomial from IEEE-802.3
  *
  * Authors: Lasse Collin <lasse.collin@tukaani.org>
- *          Igor Pavlov <http://7-zip.org/>
+ *          Igor Pavlov <https://7-zip.org/>
  *
  * This file has been put into the public domain.
  * You can do whatever you want with this file.
index a768e6d..72ddac6 100644 (file)
@@ -2,7 +2,7 @@
  * Branch/Call/Jump (BCJ) filter decoders
  *
  * Authors: Lasse Collin <lasse.collin@tukaani.org>
- *          Igor Pavlov <http://7-zip.org/>
+ *          Igor Pavlov <https://7-zip.org/>
  *
  * This file has been put into the public domain.
  * You can do whatever you want with this file.
index 156f26f..9f336bc 100644 (file)
@@ -2,7 +2,7 @@
  * LZMA2 decoder
  *
  * Authors: Lasse Collin <lasse.collin@tukaani.org>
- *          Igor Pavlov <http://7-zip.org/>
+ *          Igor Pavlov <https://7-zip.org/>
  *
  * This file has been put into the public domain.
  * You can do whatever you want with this file.
index 071d67b..92d852d 100644 (file)
@@ -2,7 +2,7 @@
  * LZMA2 definitions
  *
  * Authors: Lasse Collin <lasse.collin@tukaani.org>
- *          Igor Pavlov <http://7-zip.org/>
+ *          Igor Pavlov <https://7-zip.org/>
  *
  * This file has been put into the public domain.
  * You can do whatever you want with this file.
index 66cb5a7..430bb3a 100644 (file)
@@ -19,7 +19,7 @@
 
 /*
  * See the .xz file format specification at
- * http://tukaani.org/xz/xz-file-format.txt
+ * https://tukaani.org/xz/xz-file-format.txt
  * to understand the container format.
  */
 
index 26ecff8..7f415d7 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -52,7 +52,7 @@ unsigned long cma_get_size(const struct cma *cma)
 
 const char *cma_get_name(const struct cma *cma)
 {
-       return cma->name ? cma->name : "(undefined)";
+       return cma->name;
 }
 
 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
@@ -93,17 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
        mutex_unlock(&cma->lock);
 }
 
-static int __init cma_activate_area(struct cma *cma)
+static void __init cma_activate_area(struct cma *cma)
 {
        unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
        unsigned i = cma->count >> pageblock_order;
        struct zone *zone;
 
        cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
-       if (!cma->bitmap) {
-               cma->count = 0;
-               return -ENOMEM;
-       }
+       if (!cma->bitmap)
+               goto out_error;
 
        WARN_ON_ONCE(!pfn_valid(pfn));
        zone = page_zone(pfn_to_page(pfn));
@@ -133,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma)
        spin_lock_init(&cma->mem_head_lock);
 #endif
 
-       return 0;
+       return;
 
 not_in_zone:
-       pr_err("CMA area %s could not be activated\n", cma->name);
        bitmap_free(cma->bitmap);
+out_error:
        cma->count = 0;
-       return -EINVAL;
+       pr_err("CMA area %s could not be activated\n", cma->name);
+       return;
 }
 
 static int __init cma_init_reserved_areas(void)
 {
        int i;
 
-       for (i = 0; i < cma_area_count; i++) {
-               int ret = cma_activate_area(&cma_areas[i]);
-
-               if (ret)
-                       return ret;
-       }
+       for (i = 0; i < cma_area_count; i++)
+               cma_activate_area(&cma_areas[i]);
 
        return 0;
 }
@@ -202,13 +197,12 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
         * subsystems (like slab allocator) are available.
         */
        cma = &cma_areas[cma_area_count];
-       if (name) {
-               cma->name = name;
-       } else {
-               cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
-               if (!cma->name)
-                       return -ENOMEM;
-       }
+
+       if (name)
+               snprintf(cma->name, CMA_MAX_NAME, name);
+       else
+               snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
+
        cma->base_pfn = PFN_DOWN(base);
        cma->count = size >> PAGE_SHIFT;
        cma->order_per_bit = order_per_bit;
@@ -425,7 +419,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
        struct page *page = NULL;
        int ret = -ENOMEM;
 
-       if (!cma || !cma->count)
+       if (!cma || !cma->count || !cma->bitmap)
                return NULL;
 
        pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
index 6698fa6..20f6e24 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -4,6 +4,8 @@
 
 #include <linux/debugfs.h>
 
+#define CMA_MAX_NAME 64
+
 struct cma {
        unsigned long   base_pfn;
        unsigned long   count;
@@ -15,7 +17,7 @@ struct cma {
        spinlock_t mem_head_lock;
        struct debugfs_u32_array dfs_bitmap;
 #endif
-       const char *name;
+       char name[CMA_MAX_NAME];
 };
 
 extern struct cma cma_areas[MAX_CMA_AREAS];
index 8637560..176dcde 100644 (file)
@@ -50,6 +50,24 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 #define pageblock_start_pfn(pfn)       block_start_pfn(pfn, pageblock_order)
 #define pageblock_end_pfn(pfn)         block_end_pfn(pfn, pageblock_order)
 
+/*
+ * Fragmentation score check interval for proactive compaction purposes.
+ */
+static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
+
+/*
+ * Page order with-respect-to which proactive compaction
+ * calculates external fragmentation, which is used as
+ * the "fragmentation score" of a node/zone.
+ */
+#if defined CONFIG_TRANSPARENT_HUGEPAGE
+#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER
+#elif defined CONFIG_HUGETLBFS
+#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER
+#else
+#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#endif
+
 static unsigned long release_freepages(struct list_head *freelist)
 {
        struct page *page, *next;
@@ -136,7 +154,7 @@ EXPORT_SYMBOL(__ClearPageMovable);
 
 /*
  * Compaction is deferred when compaction fails to result in a page
- * allocation success. 1 << compact_defer_limit compactions are skipped up
+ * allocation success. 1 << compact_defer_shift, compactions are skipped up
  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
  */
 void defer_compaction(struct zone *zone, int order)
@@ -991,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                mod_node_page_state(page_pgdat(page),
                                NR_ISOLATED_ANON + page_is_file_lru(page),
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
 isolate_success:
                list_add(&page->lru, &cc->migratepages);
@@ -1459,7 +1477,7 @@ static void isolate_freepages(struct compact_control *cc)
         * this pfn aligned down to the pageblock boundary, because we do
         * block_start_pfn -= pageblock_nr_pages in the for loop.
         * For ending point, take care when isolating in last pageblock of a
-        * zone which ends in the middle of a pageblock.
+        * zone which ends in the middle of a pageblock.
         * The low boundary is the end of the pageblock the migration scanner
         * is using.
         */
@@ -1857,6 +1875,76 @@ static inline bool is_via_compact_memory(int order)
        return order == -1;
 }
 
+static bool kswapd_is_running(pg_data_t *pgdat)
+{
+       return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
+}
+
+/*
+ * A zone's fragmentation score is the external fragmentation wrt to the
+ * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value
+ * in the range [0, 100].
+ *
+ * The scaling factor ensures that proactive compaction focuses on larger
+ * zones like ZONE_NORMAL, rather than smaller, specialized zones like
+ * ZONE_DMA32. For smaller zones, the score value remains close to zero,
+ * and thus never exceeds the high threshold for proactive compaction.
+ */
+static unsigned int fragmentation_score_zone(struct zone *zone)
+{
+       unsigned long score;
+
+       score = zone->present_pages *
+                       extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
+       return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
+}
+
+/*
+ * The per-node proactive (background) compaction process is started by its
+ * corresponding kcompactd thread when the node's fragmentation score
+ * exceeds the high threshold. The compaction process remains active till
+ * the node's score falls below the low threshold, or one of the back-off
+ * conditions is met.
+ */
+static unsigned int fragmentation_score_node(pg_data_t *pgdat)
+{
+       unsigned int score = 0;
+       int zoneid;
+
+       for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+               struct zone *zone;
+
+               zone = &pgdat->node_zones[zoneid];
+               score += fragmentation_score_zone(zone);
+       }
+
+       return score;
+}
+
+static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
+{
+       unsigned int wmark_low;
+
+       /*
+        * Cap the low watermak to avoid excessive compaction
+        * activity in case a user sets the proactivess tunable
+        * close to 100 (maximum).
+        */
+       wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
+       return low ? wmark_low : min(wmark_low + 10, 100U);
+}
+
+static bool should_proactive_compact_node(pg_data_t *pgdat)
+{
+       int wmark_high;
+
+       if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
+               return false;
+
+       wmark_high = fragmentation_score_wmark(pgdat, false);
+       return fragmentation_score_node(pgdat) > wmark_high;
+}
+
 static enum compact_result __compact_finished(struct compact_control *cc)
 {
        unsigned int order;
@@ -1883,6 +1971,25 @@ static enum compact_result __compact_finished(struct compact_control *cc)
                        return COMPACT_PARTIAL_SKIPPED;
        }
 
+       if (cc->proactive_compaction) {
+               int score, wmark_low;
+               pg_data_t *pgdat;
+
+               pgdat = cc->zone->zone_pgdat;
+               if (kswapd_is_running(pgdat))
+                       return COMPACT_PARTIAL_SKIPPED;
+
+               score = fragmentation_score_zone(cc->zone);
+               wmark_low = fragmentation_score_wmark(pgdat, true);
+
+               if (score > wmark_low)
+                       ret = COMPACT_CONTINUE;
+               else
+                       ret = COMPACT_SUCCESS;
+
+               goto out;
+       }
+
        if (is_via_compact_memory(cc->order))
                return COMPACT_CONTINUE;
 
@@ -1941,6 +2048,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
                }
        }
 
+out:
        if (cc->contended || fatal_signal_pending(current))
                ret = COMPACT_CONTENDED;
 
@@ -2421,6 +2529,41 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
        return rc;
 }
 
+/*
+ * Compact all zones within a node till each zone's fragmentation score
+ * reaches within proactive compaction thresholds (as determined by the
+ * proactiveness tunable).
+ *
+ * It is possible that the function returns before reaching score targets
+ * due to various back-off conditions, such as, contention on per-node or
+ * per-zone locks.
+ */
+static void proactive_compact_node(pg_data_t *pgdat)
+{
+       int zoneid;
+       struct zone *zone;
+       struct compact_control cc = {
+               .order = -1,
+               .mode = MIGRATE_SYNC_LIGHT,
+               .ignore_skip_hint = true,
+               .whole_zone = true,
+               .gfp_mask = GFP_KERNEL,
+               .proactive_compaction = true,
+       };
+
+       for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+               zone = &pgdat->node_zones[zoneid];
+               if (!populated_zone(zone))
+                       continue;
+
+               cc.zone = zone;
+
+               compact_zone(&cc, NULL);
+
+               VM_BUG_ON(!list_empty(&cc.freepages));
+               VM_BUG_ON(!list_empty(&cc.migratepages));
+       }
+}
 
 /* Compact all zones within a node */
 static void compact_node(int nid)
@@ -2468,6 +2611,13 @@ static void compact_nodes(void)
 int sysctl_compact_memory;
 
 /*
+ * Tunable for proactive compaction. It determines how
+ * aggressively the kernel should compact memory in the
+ * background. It takes values in the range [0, 100].
+ */
+unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
+
+/*
  * This is the entry point for compacting all nodes via
  * /proc/sys/vm/compact_memory
  */
@@ -2646,6 +2796,7 @@ static int kcompactd(void *p)
 {
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
+       unsigned int proactive_defer = 0;
 
        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
@@ -2661,12 +2812,34 @@ static int kcompactd(void *p)
                unsigned long pflags;
 
                trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
-               wait_event_freezable(pgdat->kcompactd_wait,
-                               kcompactd_work_requested(pgdat));
+               if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
+                       kcompactd_work_requested(pgdat),
+                       msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) {
+
+                       psi_memstall_enter(&pflags);
+                       kcompactd_do_work(pgdat);
+                       psi_memstall_leave(&pflags);
+                       continue;
+               }
 
-               psi_memstall_enter(&pflags);
-               kcompactd_do_work(pgdat);
-               psi_memstall_leave(&pflags);
+               /* kcompactd wait timeout */
+               if (should_proactive_compact_node(pgdat)) {
+                       unsigned int prev_score, score;
+
+                       if (proactive_defer) {
+                               proactive_defer--;
+                               continue;
+                       }
+                       prev_score = fragmentation_score_node(pgdat);
+                       proactive_compact_node(pgdat);
+                       score = fragmentation_score_node(pgdat);
+                       /*
+                        * Defer proactive compaction if the fragmentation
+                        * score did not go down i.e. no progress made.
+                        */
+                       proactive_defer = score < prev_score ?
+                                       0 : 1 << COMPACT_MAX_DEFER_SHIFT;
+               }
        }
 
        return 0;
index f2bb5ff..1aaea26 100644 (file)
@@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
        if (PageHuge(page))
                return;
 
-       nr = hpage_nr_pages(page);
+       nr = thp_nr_pages(page);
 
        __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
        if (PageSwapBacked(page)) {
@@ -2468,6 +2468,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        struct file *fpin = NULL;
        pgoff_t offset = vmf->pgoff;
+       unsigned int mmap_miss;
 
        /* If we don't want any read-ahead, don't bother */
        if (vmf->vma->vm_flags & VM_RAND_READ)
@@ -2483,14 +2484,15 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        }
 
        /* Avoid banging the cache line if not needed */
-       if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
-               ra->mmap_miss++;
+       mmap_miss = READ_ONCE(ra->mmap_miss);
+       if (mmap_miss < MMAP_LOTSAMISS * 10)
+               WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
 
        /*
         * Do we miss much more than hit in this file? If so,
         * stop bothering with read-ahead. It will only hurt.
         */
-       if (ra->mmap_miss > MMAP_LOTSAMISS)
+       if (mmap_miss > MMAP_LOTSAMISS)
                return fpin;
 
        /*
@@ -2516,13 +2518,15 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
        struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
        struct file *fpin = NULL;
+       unsigned int mmap_miss;
        pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
        if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
                return fpin;
-       if (ra->mmap_miss > 0)
-               ra->mmap_miss--;
+       mmap_miss = READ_ONCE(ra->mmap_miss);
+       if (mmap_miss)
+               WRITE_ONCE(ra->mmap_miss, --mmap_miss);
        if (PageReadahead(page)) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                page_cache_async_readahead(mapping, ra, file,
@@ -2688,6 +2692,7 @@ void filemap_map_pages(struct vm_fault *vmf,
        unsigned long max_idx;
        XA_STATE(xas, &mapping->i_pages, start_pgoff);
        struct page *page;
+       unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
 
        rcu_read_lock();
        xas_for_each(&xas, page, end_pgoff) {
@@ -2724,8 +2729,8 @@ void filemap_map_pages(struct vm_fault *vmf,
                if (page->index >= max_idx)
                        goto unlock;
 
-               if (file->f_ra.mmap_miss > 0)
-                       file->f_ra.mmap_miss--;
+               if (mmap_miss > 0)
+                       mmap_miss--;
 
                vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
                if (vmf->pte)
@@ -2745,6 +2750,7 @@ next:
                        break;
        }
        rcu_read_unlock();
+       WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
 }
 EXPORT_SYMBOL(filemap_map_pages);
 
@@ -2885,7 +2891,7 @@ filler:
         * Case a, the page will be up to date when the page is unlocked.
         *    There is no need to serialise on the page lock here as the page
         *    is pinned so the lock gives no additional protection. Even if the
-        *    the page is truncated, the data is still valid if PageUptodate as
+        *    page is truncated, the data is still valid if PageUptodate as
         *    it's a race vs truncate race.
         * Case b, the page will not be up to date
         * Case c, the page may be truncated but in itself, the data may still
index 9d977b1..2183a56 100644 (file)
@@ -61,16 +61,16 @@ static u64 frontswap_failed_stores;
 static u64 frontswap_invalidates;
 
 static inline void inc_frontswap_loads(void) {
-       frontswap_loads++;
+       data_race(frontswap_loads++);
 }
 static inline void inc_frontswap_succ_stores(void) {
-       frontswap_succ_stores++;
+       data_race(frontswap_succ_stores++);
 }
 static inline void inc_frontswap_failed_stores(void) {
-       frontswap_failed_stores++;
+       data_race(frontswap_failed_stores++);
 }
 static inline void inc_frontswap_invalidates(void) {
-       frontswap_invalidates++;
+       data_race(frontswap_invalidates++);
 }
 #else
 static inline void inc_frontswap_loads(void) { }
index d8a33dd..ae096ea 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -859,7 +859,7 @@ unmap:
  * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
  * is, *@locked will be set to 0 and -EBUSY returned.
  */
-static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
+static int faultin_page(struct vm_area_struct *vma,
                unsigned long address, unsigned int *flags, int *locked)
 {
        unsigned int fault_flags = 0;
@@ -884,7 +884,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
                fault_flags |= FAULT_FLAG_TRIED;
        }
 
-       ret = handle_mm_fault(vma, address, fault_flags);
+       ret = handle_mm_fault(vma, address, fault_flags, NULL);
        if (ret & VM_FAULT_ERROR) {
                int err = vm_fault_to_errno(ret, *flags);
 
@@ -893,13 +893,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
                BUG();
        }
 
-       if (tsk) {
-               if (ret & VM_FAULT_MAJOR)
-                       tsk->maj_flt++;
-               else
-                       tsk->min_flt++;
-       }
-
        if (ret & VM_FAULT_RETRY) {
                if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
                        *locked = 0;
@@ -969,7 +962,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 
 /**
  * __get_user_pages() - pin user pages in memory
- * @tsk:       task_struct of target task
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
@@ -1028,7 +1020,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  * instead of __get_user_pages. __get_user_pages should be used only if
  * you need some special @gup_flags.
  */
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *locked)
@@ -1110,8 +1102,7 @@ retry:
 
                page = follow_page_mask(vma, start, foll_flags, &ctx);
                if (!page) {
-                       ret = faultin_page(tsk, vma, start, &foll_flags,
-                                          locked);
+                       ret = faultin_page(vma, start, &foll_flags, locked);
                        switch (ret) {
                        case 0:
                                goto retry;
@@ -1185,8 +1176,6 @@ static bool vma_permits_fault(struct vm_area_struct *vma,
 
 /**
  * fixup_user_fault() - manually resolve a user page fault
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @address:   user address
  * @fault_flags:flags to pass down to handle_mm_fault()
@@ -1214,7 +1203,7 @@ static bool vma_permits_fault(struct vm_area_struct *vma,
  * This function will not return with an unlocked mmap_lock. So it has not the
  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
  */
-int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+int fixup_user_fault(struct mm_struct *mm,
                     unsigned long address, unsigned int fault_flags,
                     bool *unlocked)
 {
@@ -1238,7 +1227,7 @@ retry:
            fatal_signal_pending(current))
                return -EINTR;
 
-       ret = handle_mm_fault(vma, address, fault_flags);
+       ret = handle_mm_fault(vma, address, fault_flags, NULL);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
                int err = vm_fault_to_errno(ret, 0);
@@ -1255,12 +1244,6 @@ retry:
                goto retry;
        }
 
-       if (tsk) {
-               if (major)
-                       tsk->maj_flt++;
-               else
-                       tsk->min_flt++;
-       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(fixup_user_fault);
@@ -1269,8 +1252,7 @@ EXPORT_SYMBOL_GPL(fixup_user_fault);
  * Please note that this function, unlike __get_user_pages will not
  * return 0 for nr_pages > 0 without FOLL_NOWAIT
  */
-static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
-                                               struct mm_struct *mm,
+static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
                                                struct page **pages,
@@ -1303,7 +1285,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
        pages_done = 0;
        lock_dropped = false;
        for (;;) {
-               ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
+               ret = __get_user_pages(mm, start, nr_pages, flags, pages,
                                       vmas, locked);
                if (!locked)
                        /* VM_FAULT_RETRY couldn't trigger, bypass */
@@ -1363,7 +1345,7 @@ retry:
                }
 
                *locked = 1;
-               ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
+               ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
                                       pages, NULL, locked);
                if (!*locked) {
                        /* Continue to retry until we succeeded */
@@ -1450,7 +1432,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(current, mm, start, nr_pages, gup_flags,
+       return __get_user_pages(mm, start, nr_pages, gup_flags,
                                NULL, NULL, locked);
 }
 
@@ -1534,7 +1516,7 @@ struct page *get_dump_page(unsigned long addr)
        struct vm_area_struct *vma;
        struct page *page;
 
-       if (__get_user_pages(current, current->mm, addr, 1,
+       if (__get_user_pages(current->mm, addr, 1,
                             FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
                             NULL) < 1)
                return NULL;
@@ -1543,8 +1525,7 @@ struct page *get_dump_page(unsigned long addr)
 }
 #endif /* CONFIG_ELF_CORE */
 #else /* CONFIG_MMU */
-static long __get_user_pages_locked(struct task_struct *tsk,
-               struct mm_struct *mm, unsigned long start,
+static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
                unsigned long nr_pages, struct page **pages,
                struct vm_area_struct **vmas, int *locked,
                unsigned int foll_flags)
@@ -1609,59 +1590,7 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
 }
 
 #ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
-{
-       /*
-        * We want to make sure we allocate the new page from the same node
-        * as the source page.
-        */
-       int nid = page_to_nid(page);
-       /*
-        * Trying to allocate a page for migration. Ignore allocation
-        * failure warnings. We don't force __GFP_THISNODE here because
-        * this node here is the node where we have CMA reservation and
-        * in some case these nodes will have really less non movable
-        * allocation memory.
-        */
-       gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
-
-       if (PageHighMem(page))
-               gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(page);
-               /*
-                * We don't want to dequeue from the pool because pool pages will
-                * mostly be from the CMA region.
-                */
-               return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
-       }
-#endif
-       if (PageTransHuge(page)) {
-               struct page *thp;
-               /*
-                * ignore allocation failure warnings
-                */
-               gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-               /*
-                * Remove the movable mask so that we don't allocate from
-                * CMA area again.
-                */
-               thp_gfpmask &= ~__GFP_MOVABLE;
-               thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
-
-       return __alloc_pages_node(nid, gfp_mask, 0);
-}
-
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-                                       struct mm_struct *mm,
+static long check_and_migrate_cma_pages(struct mm_struct *mm,
                                        unsigned long start,
                                        unsigned long nr_pages,
                                        struct page **pages,
@@ -1674,6 +1603,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
        bool migrate_allow = true;
        LIST_HEAD(cma_page_list);
        long ret = nr_pages;
+       struct migration_target_control mtc = {
+               .nid = NUMA_NO_NODE,
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
+       };
 
 check_again:
        for (i = 0; i < nr_pages;) {
@@ -1704,7 +1637,7 @@ check_again:
                                        mod_node_page_state(page_pgdat(head),
                                                            NR_ISOLATED_ANON +
                                                            page_is_file_lru(head),
-                                                           hpage_nr_pages(head));
+                                                           thp_nr_pages(head));
                                }
                        }
                }
@@ -1719,8 +1652,8 @@ check_again:
                for (i = 0; i < nr_pages; i++)
                        put_page(pages[i]);
 
-               if (migrate_pages(&cma_page_list, new_non_cma_page,
-                                 NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
                        /*
                         * some of the pages failed migration. Do get_user_pages
                         * without migration.
@@ -1735,7 +1668,7 @@ check_again:
                 * again migrating any new CMA pages which we failed to isolate
                 * earlier.
                 */
-               ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
+               ret = __get_user_pages_locked(mm, start, nr_pages,
                                                   pages, vmas, NULL,
                                                   gup_flags);
 
@@ -1749,8 +1682,7 @@ check_again:
        return ret;
 }
 #else
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-                                       struct mm_struct *mm,
+static long check_and_migrate_cma_pages(struct mm_struct *mm,
                                        unsigned long start,
                                        unsigned long nr_pages,
                                        struct page **pages,
@@ -1765,8 +1697,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  * allows us to process the FOLL_LONGTERM flag.
  */
-static long __gup_longterm_locked(struct task_struct *tsk,
-                                 struct mm_struct *mm,
+static long __gup_longterm_locked(struct mm_struct *mm,
                                  unsigned long start,
                                  unsigned long nr_pages,
                                  struct page **pages,
@@ -1791,11 +1722,10 @@ static long __gup_longterm_locked(struct task_struct *tsk,
                flags = memalloc_nocma_save();
        }
 
-       rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
+       rc = __get_user_pages_locked(mm, start, nr_pages, pages,
                                     vmas_tmp, NULL, gup_flags);
 
        if (gup_flags & FOLL_LONGTERM) {
-               memalloc_nocma_restore(flags);
                if (rc < 0)
                        goto out;
 
@@ -1806,32 +1736,31 @@ static long __gup_longterm_locked(struct task_struct *tsk,
                        goto out;
                }
 
-               rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
+               rc = check_and_migrate_cma_pages(mm, start, rc, pages,
                                                 vmas_tmp, gup_flags);
+out:
+               memalloc_nocma_restore(flags);
        }
 
-out:
        if (vmas_tmp != vmas)
                kfree(vmas_tmp);
        return rc;
 }
 #else /* !CONFIG_FS_DAX && !CONFIG_CMA */
-static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
-                                                 struct mm_struct *mm,
+static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
                                                  unsigned long start,
                                                  unsigned long nr_pages,
                                                  struct page **pages,
                                                  struct vm_area_struct **vmas,
                                                  unsigned int flags)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+       return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
                                       NULL, flags);
 }
 #endif /* CONFIG_FS_DAX || CONFIG_CMA */
 
 #ifdef CONFIG_MMU
-static long __get_user_pages_remote(struct task_struct *tsk,
-                                   struct mm_struct *mm,
+static long __get_user_pages_remote(struct mm_struct *mm,
                                    unsigned long start, unsigned long nr_pages,
                                    unsigned int gup_flags, struct page **pages,
                                    struct vm_area_struct **vmas, int *locked)
@@ -1850,20 +1779,18 @@ static long __get_user_pages_remote(struct task_struct *tsk,
                 * This will check the vmas (even if our vmas arg is NULL)
                 * and return -ENOTSUPP if DAX isn't allowed in this case:
                 */
-               return __gup_longterm_locked(tsk, mm, start, nr_pages, pages,
+               return __gup_longterm_locked(mm, start, nr_pages, pages,
                                             vmas, gup_flags | FOLL_TOUCH |
                                             FOLL_REMOTE);
        }
 
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+       return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
                                       locked,
                                       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 }
 
 /**
  * get_user_pages_remote() - pin user pages in memory
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
@@ -1922,7 +1849,7 @@ static long __get_user_pages_remote(struct task_struct *tsk,
  * should use get_user_pages_remote because it cannot pass
  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  */
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
                unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
                struct vm_area_struct **vmas, int *locked)
@@ -1934,13 +1861,13 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
                                       pages, vmas, locked);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
 
 #else /* CONFIG_MMU */
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
                           struct vm_area_struct **vmas, int *locked)
@@ -1948,8 +1875,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
        return 0;
 }
 
-static long __get_user_pages_remote(struct task_struct *tsk,
-                                   struct mm_struct *mm,
+static long __get_user_pages_remote(struct mm_struct *mm,
                                    unsigned long start, unsigned long nr_pages,
                                    unsigned int gup_flags, struct page **pages,
                                    struct vm_area_struct **vmas, int *locked)
@@ -1969,11 +1895,10 @@ static long __get_user_pages_remote(struct task_struct *tsk,
  * @vmas:       array of pointers to vmas corresponding to each page.
  *              Or NULL if the caller does not require them.
  *
- * This is the same as get_user_pages_remote(), just with a
- * less-flexible calling convention where we assume that the task
- * and mm being operated on are the current task's and don't allow
- * passing of a locked parameter.  We also obviously don't pass
- * FOLL_REMOTE in here.
+ * This is the same as get_user_pages_remote(), just with a less-flexible
+ * calling convention where we assume that the mm being operated on belongs to
+ * the current task, and doesn't allow passing of a locked parameter.  We also
+ * obviously don't pass FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
                unsigned int gup_flags, struct page **pages,
@@ -1986,7 +1911,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __gup_longterm_locked(current, current->mm, start, nr_pages,
+       return __gup_longterm_locked(current->mm, start, nr_pages,
                                     pages, vmas, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
@@ -1996,7 +1921,7 @@ EXPORT_SYMBOL(get_user_pages);
  *
  *      mmap_read_lock(mm);
  *      do_something()
- *      get_user_pages(tsk, mm, ..., pages, NULL);
+ *      get_user_pages(mm, ..., pages, NULL);
  *      mmap_read_unlock(mm);
  *
  *  to:
@@ -2004,7 +1929,7 @@ EXPORT_SYMBOL(get_user_pages);
  *      int locked = 1;
  *      mmap_read_lock(mm);
  *      do_something()
- *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ *      get_user_pages_locked(mm, ..., pages, &locked);
  *      if (locked)
  *          mmap_read_unlock(mm);
  *
@@ -2042,7 +1967,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
        if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
                return -EINVAL;
 
-       return __get_user_pages_locked(current, current->mm, start, nr_pages,
+       return __get_user_pages_locked(current->mm, start, nr_pages,
                                       pages, NULL, locked,
                                       gup_flags | FOLL_TOUCH);
 }
@@ -2052,12 +1977,12 @@ EXPORT_SYMBOL(get_user_pages_locked);
  * get_user_pages_unlocked() is suitable to replace the form:
  *
  *      mmap_read_lock(mm);
- *      get_user_pages(tsk, mm, ..., pages, NULL);
+ *      get_user_pages(mm, ..., pages, NULL);
  *      mmap_read_unlock(mm);
  *
  *  with:
  *
- *      get_user_pages_unlocked(tsk, mm, ..., pages);
+ *      get_user_pages_unlocked(mm, ..., pages);
  *
  * It is functionally equivalent to get_user_pages_fast so
  * get_user_pages_fast should be used instead if specific gup_flags
@@ -2080,7 +2005,7 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
                return -EINVAL;
 
        mmap_read_lock(mm);
-       ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+       ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
                                      &locked, gup_flags | FOLL_TOUCH);
        if (locked)
                mmap_read_unlock(mm);
@@ -2725,7 +2650,7 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
         */
        if (gup_flags & FOLL_LONGTERM) {
                mmap_read_lock(current->mm);
-               ret = __gup_longterm_locked(current, current->mm,
+               ret = __gup_longterm_locked(current->mm,
                                            start, nr_pages,
                                            pages, NULL, gup_flags);
                mmap_read_unlock(current->mm);
@@ -2968,10 +2893,8 @@ int pin_user_pages_fast_only(unsigned long start, int nr_pages,
 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
 
 /**
- * pin_user_pages_remote() - pin pages of a remote process (task != current)
+ * pin_user_pages_remote() - pin pages of a remote process
  *
- * @tsk:       the task_struct to use for page fault accounting, or
- *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
@@ -2992,7 +2915,7 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
  * see Documentation/core-api/pin_user_pages.rst for details.
  */
-long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long pin_user_pages_remote(struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
                           unsigned int gup_flags, struct page **pages,
                           struct vm_area_struct **vmas, int *locked)
@@ -3002,7 +2925,7 @@ long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
+       return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
                                       pages, vmas, locked);
 }
 EXPORT_SYMBOL(pin_user_pages_remote);
@@ -3034,7 +2957,7 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __gup_longterm_locked(current, current->mm, start, nr_pages,
+       return __gup_longterm_locked(current->mm, start, nr_pages,
                                     pages, vmas, gup_flags);
 }
 EXPORT_SYMBOL(pin_user_pages);
@@ -3079,7 +3002,7 @@ long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
                return -EINVAL;
 
        gup_flags |= FOLL_PIN;
-       return __get_user_pages_locked(current, current->mm, start, nr_pages,
+       return __get_user_pages_locked(current->mm, start, nr_pages,
                                       pages, NULL, locked,
                                       gup_flags | FOLL_TOUCH);
 }
index 0809bae..943cb2b 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -75,7 +75,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end,
        }
 
        for (; addr < end; addr += PAGE_SIZE)
-               if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
+               if (handle_mm_fault(vma, addr, fault_flags, NULL) &
+                   VM_FAULT_ERROR)
                        return -EFAULT;
        return -EBUSY;
 }
@@ -249,7 +250,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
                swp_entry_t entry = pte_to_swp_entry(pte);
 
                /*
-                * Never fault in device private pages pages, but just report
+                * Never fault in device private pages, but just report
                 * the PFN even if not present.
                 */
                if (hmm_is_device_private_entry(range, entry)) {
index 206f52b..2ccff84 100644 (file)
@@ -303,24 +303,6 @@ static ssize_t hpage_pmd_size_show(struct kobject *kobj,
 static struct kobj_attribute hpage_pmd_size_attr =
        __ATTR_RO(hpage_pmd_size);
 
-#ifdef CONFIG_DEBUG_VM
-static ssize_t debug_cow_show(struct kobject *kobj,
-                               struct kobj_attribute *attr, char *buf)
-{
-       return single_hugepage_flag_show(kobj, attr, buf,
-                               TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
-}
-static ssize_t debug_cow_store(struct kobject *kobj,
-                              struct kobj_attribute *attr,
-                              const char *buf, size_t count)
-{
-       return single_hugepage_flag_store(kobj, attr, buf, count,
-                                TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
-}
-static struct kobj_attribute debug_cow_attr =
-       __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
-#endif /* CONFIG_DEBUG_VM */
-
 static struct attribute *hugepage_attr[] = {
        &enabled_attr.attr,
        &defrag_attr.attr,
@@ -329,9 +311,6 @@ static struct attribute *hugepage_attr[] = {
 #ifdef CONFIG_SHMEM
        &shmem_enabled_attr.attr,
 #endif
-#ifdef CONFIG_DEBUG_VM
-       &debug_cow_attr.attr,
-#endif
        NULL,
 };
 
@@ -640,7 +619,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
                page_add_new_anon_rmap(page, vma, haddr, true);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
                add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
index e52c878..a301c2d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/memblock.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/sched/mm.h>
 #include <linux/mmdebug.h>
 #include <linux/sched/signal.h>
 #include <linux/rmap.h>
@@ -133,7 +134,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
 /*
  * Subpool accounting for allocating and reserving pages.
  * Return -ENOMEM if there are not enough resources to satisfy the
- * the request.  Otherwise, return the number of pages by which the
+ * request.  Otherwise, return the number of pages by which the
  * global pools must be adjusted (upward).  The returned value may
  * only be different than the passed value (delta) in the case where
  * a subpool minimum size must be maintained.
@@ -1040,10 +1041,16 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 {
        struct page *page;
+       bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
+
+       list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
+               if (nocma && is_migrate_cma_page(page))
+                       continue;
 
-       list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
                if (!PageHWPoison(page))
                        break;
+       }
+
        /*
         * if 'non-isolated free hugepage' not found on the list,
         * the allocation fails.
@@ -1093,15 +1100,6 @@ retry_cpuset:
        return NULL;
 }
 
-/* Movability of hugepages depends on migration support. */
-static inline gfp_t htlb_alloc_mask(struct hstate *h)
-{
-       if (hugepage_movable_supported(h))
-               return GFP_HIGHUSER_MOVABLE;
-       else
-               return GFP_HIGHUSER;
-}
-
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve,
@@ -1944,7 +1942,7 @@ out_unlock:
        return page;
 }
 
-struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
                                     int nid, nodemask_t *nmask)
 {
        struct page *page;
@@ -1986,31 +1984,9 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
 }
 
 /* page migration callback function */
-struct page *alloc_huge_page_node(struct hstate *h, int nid)
-{
-       gfp_t gfp_mask = htlb_alloc_mask(h);
-       struct page *page = NULL;
-
-       if (nid != NUMA_NO_NODE)
-               gfp_mask |= __GFP_THISNODE;
-
-       spin_lock(&hugetlb_lock);
-       if (h->free_huge_pages - h->resv_huge_pages > 0)
-               page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
-       spin_unlock(&hugetlb_lock);
-
-       if (!page)
-               page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
-
-       return page;
-}
-
-/* page migration callback function */
 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
-               nodemask_t *nmask)
+               nodemask_t *nmask, gfp_t gfp_mask)
 {
-       gfp_t gfp_mask = htlb_alloc_mask(h);
-
        spin_lock(&hugetlb_lock);
        if (h->free_huge_pages - h->resv_huge_pages > 0) {
                struct page *page;
@@ -2038,7 +2014,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 
        gfp_mask = htlb_alloc_mask(h);
        node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
-       page = alloc_huge_page_nodemask(h, node, nodemask);
+       page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
        mpol_cond_put(mpol);
 
        return page;
@@ -2167,7 +2143,7 @@ static void return_unused_surplus_pages(struct hstate *h,
         * evenly across all nodes with memory. Iterate across these nodes
         * until we can no longer free unreserved surplus pages. This occurs
         * when the nodes with surplus pages have no free pages.
-        * free_pool_huge_page() will balance the the freed pages across the
+        * free_pool_huge_page() will balance the freed pages across the
         * on-line nodes with memory and will handle the hstate accounting.
         *
         * Note that we decrement resv_huge_pages as we free the pages.  If
@@ -3458,13 +3434,21 @@ static int __init default_hugepagesz_setup(char *s)
 }
 __setup("default_hugepagesz=", default_hugepagesz_setup);
 
-static unsigned int cpuset_mems_nr(unsigned int *array)
+static unsigned int allowed_mems_nr(struct hstate *h)
 {
        int node;
        unsigned int nr = 0;
+       nodemask_t *mpol_allowed;
+       unsigned int *array = h->free_huge_pages_node;
+       gfp_t gfp_mask = htlb_alloc_mask(h);
 
-       for_each_node_mask(node, cpuset_current_mems_allowed)
-               nr += array[node];
+       mpol_allowed = policy_nodemask_current(gfp_mask);
+
+       for_each_node_mask(node, cpuset_current_mems_allowed) {
+               if (!mpol_allowed ||
+                   (mpol_allowed && node_isset(node, *mpol_allowed)))
+                       nr += array[node];
+       }
 
        return nr;
 }
@@ -3643,12 +3627,18 @@ static int hugetlb_acct_memory(struct hstate *h, long delta)
         * we fall back to check against current free page availability as
         * a best attempt and hopefully to minimize the impact of changing
         * semantics that cpuset has.
+        *
+        * Apart from cpuset, we also have memory policy mechanism that
+        * also determines from which node the kernel will allocate memory
+        * in a NUMA system. So similar to cpuset, we also should consider
+        * the memory policy of the current task. Similar to the description
+        * above.
         */
        if (delta > 0) {
                if (gather_surplus_pages(h, delta) < 0)
                        goto out;
 
-               if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
+               if (delta > allowed_mems_nr(h)) {
                        return_unused_surplus_pages(h, delta);
                        goto out;
                }
@@ -3953,7 +3943,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        continue;
 
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
                        spin_unlock(ptl);
                        /*
                         * We just unmapped a page of PMDs by clearing a PUD.
@@ -4540,10 +4530,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
-       } else {
-               ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
-               if (!ptep)
-                       return VM_FAULT_OOM;
        }
 
        /*
@@ -5020,7 +5006,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (!ptep)
                        continue;
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
                        pages++;
                        spin_unlock(ptl);
                        shared_pmd = true;
@@ -5401,12 +5387,14 @@ out:
  * returns: 1 successfully unmapped a shared pte page
  *         0 the underlying pte page is not shared, or it is the last user
  */
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                                       unsigned long *addr, pte_t *ptep)
 {
        pgd_t *pgd = pgd_offset(mm, *addr);
        p4d_t *p4d = p4d_offset(pgd, *addr);
        pud_t *pud = pud_offset(p4d, *addr);
 
+       i_mmap_assert_write_locked(vma->vm_file->f_mapping);
        BUG_ON(page_count(virt_to_page(ptep)) == 0);
        if (page_count(virt_to_page(ptep)) == 1)
                return 0;
@@ -5424,7 +5412,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        return NULL;
 }
 
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
@@ -5694,12 +5683,14 @@ void __init hugetlb_cma_reserve(int order)
        reserved = 0;
        for_each_node_state(nid, N_ONLINE) {
                int res;
+               char name[20];
 
                size = min(per_node, hugetlb_cma_size - reserved);
                size = round_up(size, PAGE_SIZE << order);
 
+               snprintf(name, 20, "hugetlb%d", nid);
                res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
-                                                0, false, "hugetlb",
+                                                0, false, name,
                                                 &hugetlb_cma[nid], nid);
                if (res) {
                        pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
index aabf65d..1f87aec 100644 (file)
@@ -655,7 +655,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
        snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
        cft->private = MEMFILE_PRIVATE(idx, 0);
        cft->seq_show = hugetlb_events_show;
-       cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]),
+       cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
        cft->flags = CFTYPE_NOT_ON_ROOT;
 
        /* Add the events.local file */
@@ -664,7 +664,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx)
        cft->private = MEMFILE_PRIVATE(idx, 0);
        cft->seq_show = hugetlb_events_local_show;
        cft->file_offset = offsetof(struct hugetlb_cgroup,
-                                   events_local_file[idx]),
+                                   events_local_file[idx]);
        cft->flags = CFTYPE_NOT_ON_ROOT;
 
        /* NULL terminate the last cft */
index 9886db2..10c6776 100644 (file)
@@ -239,6 +239,7 @@ struct compact_control {
        bool no_set_skip_hint;          /* Don't mark blocks for skipping */
        bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
        bool direct_compaction;         /* False from kcompactd or /proc/... */
+       bool proactive_compaction;      /* kcompactd proactive compaction */
        bool whole_zone;                /* Whole zone should/has been scanned */
        bool contended;                 /* Signal lock or sched contention */
        bool rescan;                    /* Rescanning the same pageblock */
@@ -368,7 +369,7 @@ extern void clear_page_mlock(struct page *page);
 static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 {
        if (TestClearPageMlocked(page)) {
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                /* Holding pmd lock, no change in irq context: __mod is safe */
                __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
@@ -395,7 +396,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
        unsigned long start, end;
 
        start = __vma_address(page, vma);
-       end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+       end = start + thp_size(page) - PAGE_SIZE;
 
        /* page should be within @vma mapping range */
        VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
@@ -612,5 +613,11 @@ static inline bool is_migrate_highatomic_page(struct page *page)
 }
 
 void setup_zone_pageset(struct zone *zone);
-extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+struct migration_target_control {
+       int nid;                /* preferred node id */
+       nodemask_t *nmask;
+       gfp_t gfp_mask;
+};
+
 #endif /* __MM_INTERNAL_H */
index d532c25..370d970 100644 (file)
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_tags_report.o = $(CC_FLAGS_FTRACE)
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 CC_FLAGS_KASAN_RUNTIME := $(call cc-option, -fno-conserve-stack)
-CC_FLAGS_KASAN_RUNTIME += $(call cc-option, -fno-stack-protector)
+CC_FLAGS_KASAN_RUNTIME += -fno-stack-protector
 # Disable branch tracing to avoid recursion.
 CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING
 
index b52bd46..e749e56 100644 (file)
@@ -466,7 +466,7 @@ int __khugepaged_enter(struct mm_struct *mm)
                return -ENOMEM;
 
        /* __khugepaged_exit() must not run from under us */
-       VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+       VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
                free_mm_slot(mm_slot);
                return 0;
@@ -1173,7 +1173,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address, true);
-       lru_cache_add_active_or_unevictable(new_page, vma);
+       lru_cache_add_inactive_or_unevictable(new_page, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache_pmd(vma, address, pmd);
index e362dc3..5e252d9 100644 (file)
@@ -1169,8 +1169,10 @@ static bool update_checksum(struct kmemleak_object *object)
        u32 old_csum = object->checksum;
 
        kasan_disable_current();
+       kcsan_disable_current();
        object->checksum = crc32(0, (void *)object->pointer, object->size);
        kasan_enable_current();
+       kcsan_enable_current();
 
        return object->checksum != old_csum;
 }
index 217842a..0aa2247 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -480,7 +480,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
                        break;
                if (PageKsm(page))
                        ret = handle_mm_fault(vma, addr,
-                                       FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
+                                             FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
+                                             NULL);
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
index e825804..5aa6e44 100644 (file)
@@ -180,7 +180,7 @@ unsigned long list_lru_count_one(struct list_lru *lru,
 
        rcu_read_lock();
        l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
-       count = l->nr_items;
+       count = READ_ONCE(l->nr_items);
        rcu_read_unlock();
 
        return count;
index f98ff91..3bd7040 100644 (file)
@@ -205,15 +205,14 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
 long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
 {
        long ret = -EFAULT;
-       mm_segment_t old_fs = get_fs();
+       mm_segment_t old_fs = force_uaccess_begin();
 
-       set_fs(USER_DS);
        if (access_ok(src, size)) {
                pagefault_disable();
                ret = __copy_from_user_inatomic(dst, src, size);
                pagefault_enable();
        }
-       set_fs(old_fs);
+       force_uaccess_end(old_fs);
 
        if (ret)
                return -EFAULT;
@@ -233,15 +232,14 @@ EXPORT_SYMBOL_GPL(copy_from_user_nofault);
 long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
 {
        long ret = -EFAULT;
-       mm_segment_t old_fs = get_fs();
+       mm_segment_t old_fs = force_uaccess_begin();
 
-       set_fs(USER_DS);
        if (access_ok(dst, size)) {
                pagefault_disable();
                ret = __copy_to_user_inatomic(dst, src, size);
                pagefault_enable();
        }
-       set_fs(old_fs);
+       force_uaccess_end(old_fs);
 
        if (ret)
                return -EFAULT;
@@ -270,17 +268,17 @@ EXPORT_SYMBOL_GPL(copy_to_user_nofault);
 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
                              long count)
 {
-       mm_segment_t old_fs = get_fs();
+       mm_segment_t old_fs;
        long ret;
 
        if (unlikely(count <= 0))
                return 0;
 
-       set_fs(USER_DS);
+       old_fs = force_uaccess_begin();
        pagefault_disable();
        ret = strncpy_from_user(dst, unsafe_addr, count);
        pagefault_enable();
-       set_fs(old_fs);
+       force_uaccess_end(old_fs);
 
        if (ret >= count) {
                ret = count;
@@ -310,14 +308,14 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
  */
 long strnlen_user_nofault(const void __user *unsafe_addr, long count)
 {
-       mm_segment_t old_fs = get_fs();
+       mm_segment_t old_fs;
        int ret;
 
-       set_fs(USER_DS);
+       old_fs = force_uaccess_begin();
        pagefault_disable();
        ret = strnlen_user(unsafe_addr, count);
        pagefault_enable();
-       set_fs(old_fs);
+       force_uaccess_end(old_fs);
 
        return ret;
 }
index 8d9ceea..b807952 100644 (file)
@@ -781,7 +781,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
        if (mem_cgroup_disabled())
                return;
 
-       if (vmstat_item_in_bytes(idx))
+       if (memcg_stat_item_in_bytes(idx))
                threshold <<= PAGE_SHIFT;
 
        x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
@@ -1488,6 +1488,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
        seq_buf_printf(&s, "slab %llu\n",
                       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
                             memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
+       seq_buf_printf(&s, "percpu %llu\n",
+                      (u64)memcg_page_state(memcg, MEMCG_PERCPU_B));
        seq_buf_printf(&s, "sock %llu\n",
                       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
                       PAGE_SIZE);
@@ -1528,12 +1530,18 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
                       memcg_events(memcg, PGMAJFAULT));
 
-       seq_buf_printf(&s, "workingset_refault %lu\n",
-                      memcg_page_state(memcg, WORKINGSET_REFAULT));
-       seq_buf_printf(&s, "workingset_activate %lu\n",
-                      memcg_page_state(memcg, WORKINGSET_ACTIVATE));
+       seq_buf_printf(&s, "workingset_refault_anon %lu\n",
+                      memcg_page_state(memcg, WORKINGSET_REFAULT_ANON));
+       seq_buf_printf(&s, "workingset_refault_file %lu\n",
+                      memcg_page_state(memcg, WORKINGSET_REFAULT_FILE));
+       seq_buf_printf(&s, "workingset_activate_anon %lu\n",
+                      memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON));
+       seq_buf_printf(&s, "workingset_activate_file %lu\n",
+                      memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE));
+       seq_buf_printf(&s, "workingset_restore %lu\n",
+                      memcg_page_state(memcg, WORKINGSET_RESTORE_ANON));
        seq_buf_printf(&s, "workingset_restore %lu\n",
-                      memcg_page_state(memcg, WORKINGSET_RESTORE));
+                      memcg_page_state(memcg, WORKINGSET_RESTORE_FILE));
        seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
                       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
 
@@ -2414,7 +2422,7 @@ static void high_work_func(struct work_struct *work)
  *
  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
  *   overage ratio to a delay.
- * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
+ * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
  *   to produce a reasonable delay curve.
  *
@@ -5129,13 +5137,15 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
        if (!pn)
                return 1;
 
-       pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
+       pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
+                                                GFP_KERNEL_ACCOUNT);
        if (!pn->lruvec_stat_local) {
                kfree(pn);
                return 1;
        }
 
-       pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
+       pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
+                                              GFP_KERNEL_ACCOUNT);
        if (!pn->lruvec_stat_cpu) {
                free_percpu(pn->lruvec_stat_local);
                kfree(pn);
@@ -5209,11 +5219,13 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
                goto fail;
        }
 
-       memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
+       memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
+                                               GFP_KERNEL_ACCOUNT);
        if (!memcg->vmstats_local)
                goto fail;
 
-       memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
+       memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
+                                                GFP_KERNEL_ACCOUNT);
        if (!memcg->vmstats_percpu)
                goto fail;
 
@@ -5262,7 +5274,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        struct mem_cgroup *memcg;
        long error = -ENOMEM;
 
+       memalloc_use_memcg(parent);
        memcg = mem_cgroup_alloc();
+       memalloc_unuse_memcg();
        if (IS_ERR(memcg))
                return ERR_CAST(memcg);
 
@@ -5575,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page,
 {
        struct lruvec *from_vec, *to_vec;
        struct pglist_data *pgdat;
-       unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
+       unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
        int ret;
 
        VM_BUG_ON(from == to);
@@ -6668,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
  */
 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
 {
-       unsigned int nr_pages = hpage_nr_pages(page);
+       unsigned int nr_pages = thp_nr_pages(page);
        struct mem_cgroup *memcg = NULL;
        int ret = 0;
 
@@ -6898,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
                return;
 
        /* Force-charge the new page. The old one will be freed soon */
-       nr_pages = hpage_nr_pages(newpage);
+       nr_pages = thp_nr_pages(newpage);
 
        page_counter_charge(&memcg->memory, nr_pages);
        if (do_memsw_account())
@@ -7100,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
         * ancestor for the swap instead and transfer the memory+swap charge.
         */
        swap_memcg = mem_cgroup_id_get_online(memcg);
-       nr_entries = hpage_nr_pages(page);
+       nr_entries = thp_nr_pages(page);
        /* Get references for the tail pages, too */
        if (nr_entries > 1)
                mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
@@ -7144,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  */
 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
 {
-       unsigned int nr_pages = hpage_nr_pages(page);
+       unsigned int nr_pages = thp_nr_pages(page);
        struct page_counter *counter;
        struct mem_cgroup *memcg;
        unsigned short oldid;
index 47b8ccb..f1aa643 100644 (file)
@@ -1648,9 +1648,12 @@ EXPORT_SYMBOL(unpoison_memory);
 
 static struct page *new_page(struct page *p, unsigned long private)
 {
-       int nid = page_to_nid(p);
+       struct migration_target_control mtc = {
+               .nid = page_to_nid(p),
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+       };
 
-       return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
+       return alloc_migration_target(p, (unsigned long)&mtc);
 }
 
 /*
index c39a13b..602f428 100644 (file)
@@ -71,6 +71,8 @@
 #include <linux/dax.h>
 #include <linux/oom.h>
 #include <linux/numa.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
 
 #include <trace/events/kmem.h>
 
@@ -1800,7 +1802,7 @@ out_unlock:
  * @pfn: source kernel pfn
  * @pgprot: pgprot flags for the inserted page
  *
- * This is exactly like vmf_insert_pfn(), except that it allows drivers to
+ * This is exactly like vmf_insert_pfn(), except that it allows drivers
  * to override pgprot on a per-page basis.
  *
  * This only makes sense for IO mappings, and it makes no sense for
@@ -1936,7 +1938,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
  * @pfn: source kernel pfn
  * @pgprot: pgprot flags for the inserted page
  *
- * This is exactly like vmf_insert_mixed(), except that it allows drivers to
+ * This is exactly like vmf_insert_mixed(), except that it allows drivers
  * to override pgprot on a per-page basis.
  *
  * Typically this function should be used by drivers to set caching- and
@@ -2409,8 +2411,6 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long addr = vmf->address;
 
-       debug_dma_assert_idle(src);
-
        if (likely(src)) {
                copy_user_highpage(dst, src, addr, vma);
                return true;
@@ -2715,7 +2715,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                 */
                ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
                page_add_new_anon_rmap(new_page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(new_page, vma);
+               lru_cache_add_inactive_or_unevictable(new_page, vma);
                /*
                 * We call the notify macro here because, when using secondary
                 * mmu page tables (such as kvm shadow page tables), we want the
@@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        int locked;
        int exclusive = 0;
        vm_fault_t ret = 0;
+       void *shadow = NULL;
 
        if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
                goto out;
@@ -3127,8 +3128,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        if (!page) {
                struct swap_info_struct *si = swp_swap_info(entry);
 
-               if (si->flags & SWP_SYNCHRONOUS_IO &&
-                               __swap_count(entry) == 1) {
+               if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
+                   __swap_count(entry) == 1) {
                        /* skip swapcache */
                        page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
                                                        vmf->address);
@@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                        goto out_page;
                                }
 
-                               /*
-                                * XXX: Move to lru_cache_add() when it
-                                * supports new vs putback
-                                */
-                               spin_lock_irq(&page_pgdat(page)->lru_lock);
-                               lru_note_cost_page(page);
-                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
+                               shadow = get_shadow_from_swap_cache(entry);
+                               if (shadow)
+                                       workingset_refault(page, shadow);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
@@ -3266,10 +3263,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        /* ksm created a completely new copy */
        if (unlikely(page != swapcache && swapcache)) {
                page_add_new_anon_rmap(page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        } else {
                do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
-               activate_page(page);
        }
 
        swap_free(entry);
@@ -3414,7 +3410,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
        inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, vmf->address, false);
-       lru_cache_add_active_or_unevictable(page, vma);
+       lru_cache_add_inactive_or_unevictable(page, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3672,7 +3668,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
        if (write && !(vma->vm_flags & VM_SHARED)) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, vmf->address, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        } else {
                inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
                page_add_file_rmap(page, false);
@@ -4251,6 +4247,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
                                vmf->flags & FAULT_FLAG_WRITE)) {
                update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
        } else {
+               /* Skip spurious TLB flush for retried page fault */
+               if (vmf->flags & FAULT_FLAG_TRIED)
+                       goto unlock;
                /*
                 * This is needed only for protection faults but the arch code
                 * is not yet telling us if this is a protection fault or not.
@@ -4360,6 +4359,67 @@ retry_pud:
        return handle_pte_fault(&vmf);
 }
 
+/**
+ * mm_account_fault - Do page fault accountings
+ *
+ * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
+ *        of perf event counters, but we'll still do the per-task accounting to
+ *        the task who triggered this page fault.
+ * @address: the faulted address.
+ * @flags: the fault flags.
+ * @ret: the fault retcode.
+ *
+ * This will take care of most of the page fault accountings.  Meanwhile, it
+ * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
+ * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
+ * still be in per-arch page fault handlers at the entry of page fault.
+ */
+static inline void mm_account_fault(struct pt_regs *regs,
+                                   unsigned long address, unsigned int flags,
+                                   vm_fault_t ret)
+{
+       bool major;
+
+       /*
+        * We don't do accounting for some specific faults:
+        *
+        * - Unsuccessful faults (e.g. when the address wasn't valid).  That
+        *   includes arch_vma_access_permitted() failing before reaching here.
+        *   So this is not a "this many hardware page faults" counter.  We
+        *   should use the hw profiling for that.
+        *
+        * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
+        *   once they're completed.
+        */
+       if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
+               return;
+
+       /*
+        * We define the fault as a major fault when the final successful fault
+        * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
+        * handle it immediately previously).
+        */
+       major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
+
+       if (major)
+               current->maj_flt++;
+       else
+               current->min_flt++;
+
+       /*
+        * If the fault is done for GUP, regs will be NULL.  We only do the
+        * accounting for the per thread fault counters who triggered the
+        * fault, and we skip the perf event updates.
+        */
+       if (!regs)
+               return;
+
+       if (major)
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+       else
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+}
+
 /*
  * By the time we get here, we already hold the mm semaphore
  *
@@ -4367,7 +4427,7 @@ retry_pud:
  * return value.  See filemap_fault() and __lock_page_or_retry().
  */
 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
-               unsigned int flags)
+                          unsigned int flags, struct pt_regs *regs)
 {
        vm_fault_t ret;
 
@@ -4408,6 +4468,8 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                        mem_cgroup_oom_synchronize(false);
        }
 
+       mm_account_fault(regs, address, flags, ret);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(handle_mm_fault);
@@ -4681,7 +4743,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
                void *maddr;
                struct page *page = NULL;
 
-               ret = get_user_pages_remote(tsk, mm, addr, 1,
+               ret = get_user_pages_remote(mm, addr, 1,
                                gup_flags, &page, &vma, NULL);
                if (ret <= 0) {
 #ifndef CONFIG_HAVE_IOREMAP_PROT
index ac6961a..e9d5ab5 100644 (file)
@@ -350,6 +350,16 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
        return err;
 }
 
+#ifdef CONFIG_NUMA
+int __weak memory_add_physaddr_to_nid(u64 start)
+{
+       pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+                       start);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
                                     unsigned long start_pfn,
@@ -844,8 +854,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
        node_states_set_node(nid, &arg);
        if (need_zonelists_rebuild)
                build_all_zonelists(NULL);
-       else
-               zone_pcp_update(zone);
+       zone_pcp_update(zone);
 
        init_per_zone_wmark_min();
 
@@ -1267,26 +1276,30 @@ found:
 
 static struct page *new_node_page(struct page *page, unsigned long private)
 {
-       int nid = page_to_nid(page);
        nodemask_t nmask = node_states[N_MEMORY];
+       struct migration_target_control mtc = {
+               .nid = page_to_nid(page),
+               .nmask = &nmask,
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+       };
 
        /*
         * try to allocate from a different node but reuse this node if there
         * are no other online nodes to be used (e.g. we are offlining a part
         * of the only existing node)
         */
-       node_clear(nid, nmask);
+       node_clear(mtc.nid, nmask);
        if (nodes_empty(nmask))
-               node_set(nid, nmask);
+               node_set(mtc.nid, nmask);
 
-       return new_page_nodemask(page, nid, &nmask);
+       return alloc_migration_target(page, (unsigned long)&mtc);
 }
 
 static int
 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
-       struct page *page;
+       struct page *page, *head;
        int ret = 0;
        LIST_HEAD(source);
 
@@ -1294,15 +1307,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                if (!pfn_valid(pfn))
                        continue;
                page = pfn_to_page(pfn);
+               head = compound_head(page);
 
                if (PageHuge(page)) {
-                       struct page *head = compound_head(page);
                        pfn = page_to_pfn(head) + compound_nr(head) - 1;
                        isolate_huge_page(head, &source);
                        continue;
                } else if (PageTransHuge(page))
-                       pfn = page_to_pfn(compound_head(page))
-                               + hpage_nr_pages(page) - 1;
+                       pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
 
                /*
                 * HWPoison pages have elevated reference counts so the migration would
@@ -1747,7 +1759,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
         */
        rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
        if (rc)
-               goto done;
+               return rc;
 
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
@@ -1771,9 +1783,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
 
        try_offline_node(nid);
 
-done:
        mem_hotplug_done();
-       return rc;
+       return 0;
 }
 
 /**
index b9e85d4..eddbe4e 100644 (file)
@@ -129,7 +129,7 @@ static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 
 /**
  * numa_map_to_online_node - Find closest online node
- * @nid: Node id to start the search
+ * @node: Node id to start the search
  *
  * Lookup the next closest node by distance if @nid is not online.
  */
@@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
                        list_add_tail(&head->lru, pagelist);
                        mod_node_page_state(page_pgdat(head),
                                NR_ISOLATED_ANON + page_is_file_lru(head),
-                               hpage_nr_pages(head));
+                               thp_nr_pages(head));
                } else if (flags & MPOL_MF_STRICT) {
                        /*
                         * Non-movable page may reach here.  And, there may be
@@ -1065,27 +1065,6 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
        return 0;
 }
 
-/* page allocation callback for NUMA node migration */
-struct page *alloc_new_node_page(struct page *page, unsigned long node)
-{
-       if (PageHuge(page))
-               return alloc_huge_page_node(page_hstate(compound_head(page)),
-                                       node);
-       else if (PageTransHuge(page)) {
-               struct page *thp;
-
-               thp = alloc_pages_node(node,
-                       (GFP_TRANSHUGE | __GFP_THISNODE),
-                       HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       } else
-               return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
-                                                   __GFP_THISNODE, 0);
-}
-
 /*
  * Migrate pages from one node to a target node.
  * Returns error or the number of pages not migrated.
@@ -1096,6 +1075,10 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
        nodemask_t nmask;
        LIST_HEAD(pagelist);
        int err = 0;
+       struct migration_target_control mtc = {
+               .nid = dest,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
        nodes_clear(nmask);
        node_set(source, nmask);
@@ -1110,8 +1093,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
        if (!list_empty(&pagelist)) {
-               err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
-                                       MIGRATE_SYNC, MR_SYSCALL);
+               err = migrate_pages(&pagelist, alloc_migration_target, NULL,
+                               (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
                if (err)
                        putback_movable_pages(&pagelist);
        }
@@ -1632,11 +1615,11 @@ static int kernel_get_mempolicy(int __user *policy,
        int pval;
        nodemask_t nodes;
 
-       addr = untagged_addr(addr);
-
        if (nmask != NULL && maxnode < nr_node_ids)
                return -EINVAL;
 
+       addr = untagged_addr(addr);
+
        err = do_get_mempolicy(&pval, &nodes, addr, flags);
 
        if (err)
@@ -1890,7 +1873,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
  * Return a nodemask representing a mempolicy for filtering nodes for
  * page allocation
  */
-static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
+nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
 {
        /* Lower zones don't get a nodemask applied for MPOL_BIND */
        if (unlikely(policy->mode == MPOL_BIND) &&
index 85efab3..79bff63 100644 (file)
@@ -489,7 +489,7 @@ void mempool_free(void *element, mempool_t *pool)
         * ensures that there will be frees which return elements to the
         * pool waking up the waiters.
         */
-       if (unlikely(pool->curr_nr < pool->min_nr)) {
+       if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
                spin_lock_irqsave(&pool->lock, flags);
                if (likely(pool->curr_nr < pool->min_nr)) {
                        add_element(pool, element);
index d179657..34a842a 100644 (file)
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
                        put_page(page);
                } else {
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
                        putback_lru_page(page);
                }
        }
@@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
         */
        expected_count += is_device_private_page(page);
        if (mapping)
-               expected_count += hpage_nr_pages(page) + page_has_private(page);
+               expected_count += thp_nr_pages(page) + page_has_private(page);
 
        return expected_count;
 }
@@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
        } else {
                /* thp page */
                BUG_ON(!PageTransHuge(src));
-               nr_pages = hpage_nr_pages(src);
+               nr_pages = thp_nr_pages(src);
        }
 
        for (i = 0; i < nr_pages; i++) {
@@ -1213,7 +1213,7 @@ out:
                 */
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
        }
 
        /*
@@ -1418,22 +1418,35 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                enum migrate_mode mode, int reason)
 {
        int retry = 1;
+       int thp_retry = 1;
        int nr_failed = 0;
        int nr_succeeded = 0;
+       int nr_thp_succeeded = 0;
+       int nr_thp_failed = 0;
+       int nr_thp_split = 0;
        int pass = 0;
+       bool is_thp = false;
        struct page *page;
        struct page *page2;
        int swapwrite = current->flags & PF_SWAPWRITE;
-       int rc;
+       int rc, nr_subpages;
 
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;
 
-       for(pass = 0; pass < 10 && retry; pass++) {
+       for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
                retry = 0;
+               thp_retry = 0;
 
                list_for_each_entry_safe(page, page2, from, lru) {
 retry:
+                       /*
+                        * THP statistics is based on the source huge page.
+                        * Capture required information that might get lost
+                        * during migration.
+                        */
+                       is_thp = PageTransHuge(page);
+                       nr_subpages = thp_nr_pages(page);
                        cond_resched();
 
                        if (PageHuge(page))
@@ -1464,15 +1477,30 @@ retry:
                                        unlock_page(page);
                                        if (!rc) {
                                                list_safe_reset_next(page, page2, lru);
+                                               nr_thp_split++;
                                                goto retry;
                                        }
                                }
+                               if (is_thp) {
+                                       nr_thp_failed++;
+                                       nr_failed += nr_subpages;
+                                       goto out;
+                               }
                                nr_failed++;
                                goto out;
                        case -EAGAIN:
+                               if (is_thp) {
+                                       thp_retry++;
+                                       break;
+                               }
                                retry++;
                                break;
                        case MIGRATEPAGE_SUCCESS:
+                               if (is_thp) {
+                                       nr_thp_succeeded++;
+                                       nr_succeeded += nr_subpages;
+                                       break;
+                               }
                                nr_succeeded++;
                                break;
                        default:
@@ -1482,19 +1510,27 @@ retry:
                                 * removed from migration page list and not
                                 * retried in the next outer loop.
                                 */
+                               if (is_thp) {
+                                       nr_thp_failed++;
+                                       nr_failed += nr_subpages;
+                                       break;
+                               }
                                nr_failed++;
                                break;
                        }
                }
        }
-       nr_failed += retry;
+       nr_failed += retry + thp_retry;
+       nr_thp_failed += thp_retry;
        rc = nr_failed;
 out:
-       if (nr_succeeded)
-               count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
-       if (nr_failed)
-               count_vm_events(PGMIGRATE_FAIL, nr_failed);
-       trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
+       count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+       count_vm_events(PGMIGRATE_FAIL, nr_failed);
+       count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
+       count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
+       count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
+       trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
+                              nr_thp_failed, nr_thp_split, mode, reason);
 
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
@@ -1502,6 +1538,49 @@ out:
        return rc;
 }
 
+struct page *alloc_migration_target(struct page *page, unsigned long private)
+{
+       struct migration_target_control *mtc;
+       gfp_t gfp_mask;
+       unsigned int order = 0;
+       struct page *new_page = NULL;
+       int nid;
+       int zidx;
+
+       mtc = (struct migration_target_control *)private;
+       gfp_mask = mtc->gfp_mask;
+       nid = mtc->nid;
+       if (nid == NUMA_NO_NODE)
+               nid = page_to_nid(page);
+
+       if (PageHuge(page)) {
+               struct hstate *h = page_hstate(compound_head(page));
+
+               gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
+               return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+       }
+
+       if (PageTransHuge(page)) {
+               /*
+                * clear __GFP_RECLAIM to make the migration callback
+                * consistent with regular THP allocations.
+                */
+               gfp_mask &= ~__GFP_RECLAIM;
+               gfp_mask |= GFP_TRANSHUGE;
+               order = HPAGE_PMD_ORDER;
+       }
+       zidx = zone_idx(page_zone(page));
+       if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
+               gfp_mask |= __GFP_HIGHMEM;
+
+       new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
+
+       if (new_page && PageTransHuge(new_page))
+               prep_transhuge_page(new_page);
+
+       return new_page;
+}
+
 #ifdef CONFIG_NUMA
 
 static int store_status(int __user *status, int start, int value, int nr)
@@ -1519,9 +1598,13 @@ static int do_move_pages_to_node(struct mm_struct *mm,
                struct list_head *pagelist, int node)
 {
        int err;
+       struct migration_target_control mtc = {
+               .nid = node,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
-       err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
-                       MIGRATE_SYNC, MR_SYSCALL);
+       err = migrate_pages(pagelist, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
        if (err)
                putback_movable_pages(pagelist);
        return err;
@@ -1587,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_lru(head),
-                       hpage_nr_pages(head));
+                       thp_nr_pages(head));
        }
 out_putpage:
        /*
@@ -1951,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 
        page_lru = page_is_file_lru(page);
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
        /*
         * Isolating the page has taken another reference, so the
@@ -2168,6 +2251,16 @@ static int migrate_vma_collect_hole(unsigned long start,
        struct migrate_vma *migrate = walk->private;
        unsigned long addr;
 
+       /* Only allow populating anonymous memory. */
+       if (!vma_is_anonymous(walk->vma)) {
+               for (addr = start; addr < end; addr += PAGE_SIZE) {
+                       migrate->src[migrate->npages] = 0;
+                       migrate->dst[migrate->npages] = 0;
+                       migrate->npages++;
+               }
+               return 0;
+       }
+
        for (addr = start; addr < end; addr += PAGE_SIZE) {
                migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
                migrate->dst[migrate->npages] = 0;
@@ -2260,8 +2353,10 @@ again:
                pte = *ptep;
 
                if (pte_none(pte)) {
-                       mpfn = MIGRATE_PFN_MIGRATE;
-                       migrate->cpages++;
+                       if (vma_is_anonymous(vma)) {
+                               mpfn = MIGRATE_PFN_MIGRATE;
+                               migrate->cpages++;
+                       }
                        goto next;
                }
 
@@ -2619,7 +2714,7 @@ restore:
 
 /**
  * migrate_vma_setup() - prepare to migrate a range of memory
- * @args: contains the vma, start, and and pfns arrays for the migration
+ * @args: contains the vma, start, and pfns arrays for the migration
  *
  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
  * without an error.
@@ -2830,7 +2925,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
        inc_mm_counter(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, addr, false);
        if (!is_zone_device_page(page))
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        get_page(page);
 
        if (flush) {
index f873613..93ca2bf 100644 (file)
@@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
        if (!TestClearPageMlocked(page))
                return;
 
-       mod_zone_page_state(page_zone(page), NR_MLOCK,
-                           -hpage_nr_pages(page));
+       mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
        count_vm_event(UNEVICTABLE_PGCLEARED);
        /*
         * The previous TestClearPageMlocked() corresponds to the smp_mb()
@@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)
 
        if (!TestSetPageMlocked(page)) {
                mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
                if (!isolate_lru_page(page))
                        putback_lru_page(page);
@@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
        /*
         * Serialize with any parallel __split_huge_page_refcount() which
         * might otherwise copy PageMlocked to part of the tail pages before
-        * we clear it in the head page. It also stabilizes hpage_nr_pages().
+        * we clear it in the head page. It also stabilizes thp_nr_pages().
         */
        spin_lock_irq(&pgdat->lru_lock);
 
@@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
                goto unlock_out;
        }
 
-       nr_pages = hpage_nr_pages(page);
+       nr_pages = thp_nr_pages(page);
        __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 
        if (__munlock_isolate_lru_page(page, true)) {
index 352bb9f..4fc9181 100644 (file)
@@ -166,7 +166,7 @@ static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
 /**
  * mmu_interval_read_begin - Begin a read side critical section against a VA
  *                           range
- * interval_sub: The interval subscription
+ * @interval_sub: The interval subscription
  *
  * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
  * collision-retry scheme similar to seqcount for the VA range under
@@ -686,7 +686,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
 
 /**
  * mmu_notifier_register - Register a notifier on a mm
- * @mn: The notifier to attach
+ * @subscription: The notifier to attach
  * @mm: The mm to attach the notifier to
  *
  * Must not hold mmap_lock nor any other VM related lock when calling
@@ -856,7 +856,7 @@ static void mmu_notifier_free_rcu(struct rcu_head *rcu)
 
 /**
  * mmu_notifier_put - Release the reference on the notifier
- * @mn: The notifier to act on
+ * @subscription: The notifier to act on
  *
  * This function must be paired with each mmu_notifier_get(), it releases the
  * reference obtained by the get. If this is the last reference then process
@@ -965,7 +965,8 @@ static int __mmu_interval_notifier_insert(
  * @interval_sub: Interval subscription to register
  * @start: Starting virtual address to monitor
  * @length: Length of the range to monitor
- * @mm : mm_struct to attach to
+ * @mm: mm_struct to attach to
+ * @ops: Interval notifier operations to be called on matching events
  *
  * This function subscribes the interval notifier for notifications from the
  * mm.  Upon return the ops related to mmu_interval_notifier will be called
index 340ae77..75a3271 100644 (file)
@@ -1762,8 +1762,8 @@ EXPORT_SYMBOL_GPL(access_process_vm);
  * @newsize: The proposed filesize of the inode
  *
  * Check the shared mappings on an inode on behalf of a shrinking truncate to
- * make sure that that any outstanding VMAs aren't broken and then shrink the
- * vm_regions that extend that beyond so that do_mmap() doesn't
+ * make sure that any outstanding VMAs aren't broken and then shrink the
+ * vm_regions that extend beyond so that do_mmap() doesn't
  * automatically grant mappings that are too large.
  */
 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
index d30ce75..e90f25d 100644 (file)
@@ -196,17 +196,17 @@ static bool is_dump_unreclaim_slabs(void)
  * predictable as possible.  The goal is to return the highest value for the
  * task consuming the most memory to avoid subsequent oom failures.
  */
-unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
+long oom_badness(struct task_struct *p, unsigned long totalpages)
 {
        long points;
        long adj;
 
        if (oom_unkillable_task(p))
-               return 0;
+               return LONG_MIN;
 
        p = find_lock_task_mm(p);
        if (!p)
-               return 0;
+               return LONG_MIN;
 
        /*
         * Do not even consider tasks which are explicitly marked oom
@@ -218,7 +218,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
                        test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
                        in_vfork(p)) {
                task_unlock(p);
-               return 0;
+               return LONG_MIN;
        }
 
        /*
@@ -233,11 +233,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
        adj *= totalpages / 1000;
        points += adj;
 
-       /*
-        * Never return 0 for an eligible task regardless of the root bonus and
-        * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
-        */
-       return points > 0 ? points : 1;
+       return points;
 }
 
 static const char * const oom_constraint_text[] = {
@@ -310,7 +306,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
 static int oom_evaluate_task(struct task_struct *task, void *arg)
 {
        struct oom_control *oc = arg;
-       unsigned long points;
+       long points;
 
        if (oom_unkillable_task(task))
                goto next;
@@ -336,12 +332,12 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
         * killed first if it triggers an oom, then select it.
         */
        if (oom_task_origin(task)) {
-               points = ULONG_MAX;
+               points = LONG_MAX;
                goto select;
        }
 
        points = oom_badness(task, oc->totalpages);
-       if (!points || points < oc->chosen_points)
+       if (points == LONG_MIN || points < oc->chosen_points)
                goto next;
 
 select:
@@ -365,6 +361,8 @@ abort:
  */
 static void select_bad_process(struct oom_control *oc)
 {
+       oc->chosen_points = LONG_MIN;
+
        if (is_memcg_oom(oc))
                mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
        else {
@@ -863,6 +861,8 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
 
        p = find_lock_task_mm(victim);
        if (!p) {
+               pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
+                       message, task_pid_nr(victim), victim->comm);
                put_task_struct(victim);
                return;
        } else if (victim != p) {
index 167732f..fab5e97 100644 (file)
@@ -666,8 +666,6 @@ void prep_compound_page(struct page *page, unsigned int order)
        int i;
        int nr_pages = 1 << order;
 
-       set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
-       set_compound_order(page, order);
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
@@ -675,6 +673,9 @@ void prep_compound_page(struct page *page, unsigned int order)
                p->mapping = TAIL_MAPPING;
                set_compound_head(p, page);
        }
+
+       set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
+       set_compound_order(page, order);
        atomic_set(compound_mapcount_ptr(page), -1);
        if (hpage_pincount_available(page))
                atomic_set(compound_pincount_ptr(page), 0);
@@ -1301,6 +1302,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        struct page *page, *tmp;
        LIST_HEAD(head);
 
+       /*
+        * Ensure proper count is passed which otherwise would stuck in the
+        * below while (list_empty(list)) loop.
+        */
+       count = min(pcp->count, count);
        while (count) {
                struct list_head *list;
 
@@ -4282,7 +4288,7 @@ retry:
        /*
         * If an allocation failed after direct reclaim, it could be because
         * pages are pinned on the per-cpu lists or in high alloc reserves.
-        * Shrink them them and try again
+        * Shrink them and try again
         */
        if (!page && !drained) {
                unreserve_highatomic_pageblock(ac, false);
@@ -6192,7 +6198,7 @@ static int zone_batchsize(struct zone *zone)
  * locking.
  *
  * Any new users of pcp->batch and pcp->high should ensure they can cope with
- * those fields changing asynchronously (acording the the above rule).
+ * those fields changing asynchronously (acording to the above rule).
  *
  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  * outside of boot time (or some other assurance that no concurrent updaters
@@ -7887,7 +7893,7 @@ int __meminit init_per_zone_wmark_min(void)
 
        return 0;
 }
-core_initcall(init_per_zone_wmark_min)
+postcore_initcall(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
@@ -8203,7 +8209,7 @@ void *__init alloc_large_system_hash(const char *tablename,
  * race condition. So you can't expect this function should be exact.
  *
  * Returns a page without holding a reference. If the caller wants to
- * dereference that page (e.g., dumping), it has to make sure that that it
+ * dereference that page (e.g., dumping), it has to make sure that it
  * cannot get removed (e.g., via memory unplug) concurrently.
  *
  */
@@ -8347,6 +8353,10 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
        unsigned long pfn = start;
        unsigned int tries = 0;
        int ret = 0;
+       struct migration_target_control mtc = {
+               .nid = zone_to_nid(cc->zone),
+               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+       };
 
        migrate_prep();
 
@@ -8373,8 +8383,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                                                        &cc->migratepages);
                cc->nr_migratepages -= nr_reclaimed;
 
-               ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
-                                   NULL, 0, cc->mode, MR_CONTIG_RANGE);
+               ret = migrate_pages(&cc->migratepages, alloc_migration_target,
+                               NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
        }
        if (ret < 0) {
                putback_movable_pages(&cc->migratepages);
index b466384..afe22ad 100644 (file)
@@ -77,8 +77,8 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
                 * This is indeed racy, but we can live with some
                 * inaccuracy in the watermark.
                 */
-               if (new > c->watermark)
-                       c->watermark = new;
+               if (new > READ_ONCE(c->watermark))
+                       WRITE_ONCE(c->watermark, new);
        }
 }
 
@@ -119,9 +119,10 @@ bool page_counter_try_charge(struct page_counter *counter,
                        propagate_protected_usage(c, new);
                        /*
                         * This is racy, but we can live with some
-                        * inaccuracy in the failcnt.
+                        * inaccuracy in the failcnt which is only used
+                        * to report stats.
                         */
-                       c->failcnt++;
+                       data_race(c->failcnt++);
                        *fail = c;
                        goto failed;
                }
@@ -130,8 +131,8 @@ bool page_counter_try_charge(struct page_counter *counter,
                 * Just like with failcnt, we can live with some
                 * inaccuracy in the watermark.
                 */
-               if (new > c->watermark)
-                       c->watermark = new;
+               if (new > READ_ONCE(c->watermark))
+                       WRITE_ONCE(c->watermark, new);
        }
        return true;
 
index 9e36256..e485a6e 100644 (file)
@@ -40,7 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
                bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
                bio->bi_end_io = end_io;
 
-               bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
+               bio_add_page(bio, page, thp_size(page), 0);
        }
        return bio;
 }
@@ -85,7 +85,7 @@ static void swap_slot_free_notify(struct page *page)
                return;
 
        sis = page_swap_info(page);
-       if (!(sis->flags & SWP_BLKDEV))
+       if (data_race(!(sis->flags & SWP_BLKDEV)))
                return;
 
        /*
@@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page)
        if (unlikely(PageTransHuge(page)))
                count_vm_event(THP_SWPOUT);
 #endif
-       count_vm_events(PSWPOUT, hpage_nr_pages(page));
+       count_vm_events(PSWPOUT, thp_nr_pages(page));
 }
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
@@ -302,7 +302,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        struct swap_info_struct *sis = page_swap_info(page);
 
        VM_BUG_ON_PAGE(!PageSwapCache(page), page);
-       if (sis->flags & SWP_FS) {
+       if (data_race(sis->flags & SWP_FS)) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
                struct address_space *mapping = swap_file->f_mapping;
@@ -393,7 +393,7 @@ int swap_readpage(struct page *page, bool synchronous)
                goto out;
        }
 
-       if (sis->flags & SWP_FS) {
+       if (data_race(sis->flags & SWP_FS)) {
                struct file *swap_file = sis->swap_file;
                struct address_space *mapping = swap_file->f_mapping;
 
@@ -455,7 +455,7 @@ int swap_set_page_dirty(struct page *page)
 {
        struct swap_info_struct *sis = page_swap_info(page);
 
-       if (sis->flags & SWP_FS) {
+       if (data_race(sis->flags & SWP_FS)) {
                struct address_space *mapping = sis->swap_file->f_mapping;
 
                VM_BUG_ON_PAGE(!PageSwapCache(page), page);
index f6d07c5..242c031 100644 (file)
@@ -306,8 +306,3 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 
        return pfn < end_pfn ? -EBUSY : 0;
 }
-
-struct page *alloc_migrate_target(struct page *page, unsigned long private)
-{
-       return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
-}
index 719c352..5e77b26 100644 (file)
@@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn)
                return page_pfn == pfn;
 
        /* THP can be referenced by any subpage */
-       return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
+       return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
 }
 
 /**
@@ -227,7 +227,7 @@ next_pte:
                        if (pvmw->address >= pvmw->vma->vm_end ||
                            pvmw->address >=
                                        __vma_address(pvmw->page, pvmw->vma) +
-                                       hpage_nr_pages(pvmw->page) * PAGE_SIZE)
+                                       thp_size(pvmw->page))
                                return not_found(pvmw);
                        /* Did we cross page table boundary? */
                        if (pvmw->address % PMD_SIZE == 0) {
@@ -268,7 +268,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
        unsigned long start, end;
 
        start = __vma_address(page, vma);
-       end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+       end = start + thp_size(page) - PAGE_SIZE;
 
        if (unlikely(end < vma->vm_start || start >= vma->vm_end))
                return 0;
index 0468ba5..18b768a 100644 (file)
@@ -6,6 +6,25 @@
 #include <linux/percpu.h>
 
 /*
+ * There are two chunk types: root and memcg-aware.
+ * Chunks of each type have separate slots list.
+ *
+ * Memcg-aware chunks have an attached vector of obj_cgroup pointers, which is
+ * used to store memcg membership data of a percpu object.  Obj_cgroups are
+ * ref-counted pointers to a memory cgroup with an ability to switch dynamically
+ * to the parent memory cgroup.  This allows to reclaim a deleted memory cgroup
+ * without reclaiming of all outstanding objects, which hold a reference at it.
+ */
+enum pcpu_chunk_type {
+       PCPU_CHUNK_ROOT,
+#ifdef CONFIG_MEMCG_KMEM
+       PCPU_CHUNK_MEMCG,
+#endif
+       PCPU_NR_CHUNK_TYPES,
+       PCPU_FAIL_ALLOC = PCPU_NR_CHUNK_TYPES
+};
+
+/*
  * pcpu_block_md is the metadata block struct.
  * Each chunk's bitmap is split into a number of full blocks.
  * All units are in terms of bits.
@@ -54,6 +73,9 @@ struct pcpu_chunk {
        int                     end_offset;     /* additional area required to
                                                   have the region end page
                                                   aligned */
+#ifdef CONFIG_MEMCG_KMEM
+       struct obj_cgroup       **obj_cgroups;  /* vector of object cgroups */
+#endif
 
        int                     nr_pages;       /* # of pages served by this chunk */
        int                     nr_populated;   /* # of populated pages */
@@ -63,7 +85,7 @@ struct pcpu_chunk {
 
 extern spinlock_t pcpu_lock;
 
-extern struct list_head *pcpu_slot;
+extern struct list_head *pcpu_chunk_lists;
 extern int pcpu_nr_slots;
 extern int pcpu_nr_empty_pop_pages;
 
@@ -106,6 +128,37 @@ static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
        return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
 }
 
+#ifdef CONFIG_MEMCG_KMEM
+static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
+{
+       if (chunk->obj_cgroups)
+               return PCPU_CHUNK_MEMCG;
+       return PCPU_CHUNK_ROOT;
+}
+
+static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
+{
+       return chunk_type == PCPU_CHUNK_MEMCG;
+}
+
+#else
+static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
+{
+       return PCPU_CHUNK_ROOT;
+}
+
+static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
+{
+       return false;
+}
+#endif
+
+static inline struct list_head *pcpu_chunk_list(enum pcpu_chunk_type chunk_type)
+{
+       return &pcpu_chunk_lists[pcpu_nr_slots *
+                                pcpu_is_memcg_chunk(chunk_type)];
+}
+
 #ifdef CONFIG_PERCPU_STATS
 
 #include <linux/spinlock.h>
index 20d2b69..35c9941 100644 (file)
@@ -44,7 +44,8 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
        /* nada */
 }
 
-static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
+                                           gfp_t gfp)
 {
        const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
        struct pcpu_chunk *chunk;
@@ -52,7 +53,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
        unsigned long flags;
        int i;
 
-       chunk = pcpu_alloc_chunk(gfp);
+       chunk = pcpu_alloc_chunk(type, gfp);
        if (!chunk)
                return NULL;
 
index 3255806..c8400a2 100644 (file)
@@ -34,11 +34,15 @@ static int find_max_nr_alloc(void)
 {
        struct pcpu_chunk *chunk;
        int slot, max_nr_alloc;
+       enum pcpu_chunk_type type;
 
        max_nr_alloc = 0;
-       for (slot = 0; slot < pcpu_nr_slots; slot++)
-               list_for_each_entry(chunk, &pcpu_slot[slot], list)
-                       max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc);
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               for (slot = 0; slot < pcpu_nr_slots; slot++)
+                       list_for_each_entry(chunk, &pcpu_chunk_list(type)[slot],
+                                           list)
+                               max_nr_alloc = max(max_nr_alloc,
+                                                  chunk->nr_alloc);
 
        return max_nr_alloc;
 }
@@ -129,6 +133,9 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
        P("cur_min_alloc", cur_min_alloc);
        P("cur_med_alloc", cur_med_alloc);
        P("cur_max_alloc", cur_max_alloc);
+#ifdef CONFIG_MEMCG_KMEM
+       P("memcg_aware", pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)));
+#endif
        seq_putc(m, '\n');
 }
 
@@ -137,6 +144,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
        struct pcpu_chunk *chunk;
        int slot, max_nr_alloc;
        int *buffer;
+       enum pcpu_chunk_type type;
 
 alloc_buffer:
        spin_lock_irq(&pcpu_lock);
@@ -202,18 +210,18 @@ alloc_buffer:
                chunk_map_stats(m, pcpu_reserved_chunk, buffer);
        }
 
-       for (slot = 0; slot < pcpu_nr_slots; slot++) {
-               list_for_each_entry(chunk, &pcpu_slot[slot], list) {
-                       if (chunk == pcpu_first_chunk) {
-                               seq_puts(m, "Chunk: <- First Chunk\n");
-                               chunk_map_stats(m, chunk, buffer);
-
-
-                       } else {
-                               seq_puts(m, "Chunk:\n");
-                               chunk_map_stats(m, chunk, buffer);
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++) {
+               for (slot = 0; slot < pcpu_nr_slots; slot++) {
+                       list_for_each_entry(chunk, &pcpu_chunk_list(type)[slot],
+                                           list) {
+                               if (chunk == pcpu_first_chunk) {
+                                       seq_puts(m, "Chunk: <- First Chunk\n");
+                                       chunk_map_stats(m, chunk, buffer);
+                               } else {
+                                       seq_puts(m, "Chunk:\n");
+                                       chunk_map_stats(m, chunk, buffer);
+                               }
                        }
-
                }
        }
 
index a2b395a..e46f7a6 100644 (file)
@@ -328,12 +328,13 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
        pcpu_free_pages(chunk, pages, page_start, page_end);
 }
 
-static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
+                                           gfp_t gfp)
 {
        struct pcpu_chunk *chunk;
        struct vm_struct **vms;
 
-       chunk = pcpu_alloc_chunk(gfp);
+       chunk = pcpu_alloc_chunk(type, gfp);
        if (!chunk)
                return NULL;
 
index b626766..f470962 100644 (file)
  * takes care of normal allocations.
  *
  * The allocator organizes chunks into lists according to free size and
- * tries to allocate from the fullest chunk first.  Each chunk is managed
- * by a bitmap with metadata blocks.  The allocation map is updated on
- * every allocation and free to reflect the current state while the boundary
+ * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
+ * flag should be passed.  All memcg-aware allocations are sharing one set
+ * of chunks and all unaccounted allocations and allocations performed
+ * by processes belonging to the root memory cgroup are using the second set.
+ *
+ * The allocator tries to allocate from the fullest chunk first. Each chunk
+ * is managed by a bitmap with metadata blocks.  The allocation map is updated
+ * on every allocation and free to reflect the current state while the boundary
  * map is only updated on allocation.  Each metadata block contains
  * information to help mitigate the need to iterate over large portions
  * of the bitmap.  The reverse mapping from page to chunk is stored in
@@ -81,6 +86,7 @@
 #include <linux/kmemleak.h>
 #include <linux/sched.h>
 #include <linux/sched/mm.h>
+#include <linux/memcontrol.h>
 
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
@@ -160,7 +166,7 @@ struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
 DEFINE_SPINLOCK(pcpu_lock);    /* all internal data structures */
 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
 
-struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
+struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 
 /* chunks which need their map areas extended, protected by pcpu_lock */
 static LIST_HEAD(pcpu_map_extend_chunks);
@@ -500,6 +506,9 @@ static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
                              bool move_front)
 {
        if (chunk != pcpu_reserved_chunk) {
+               struct list_head *pcpu_slot;
+
+               pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
                if (move_front)
                        list_move(&chunk->list, &pcpu_slot[slot]);
                else
@@ -1211,11 +1220,14 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
  *
  * This function determines the size of an allocation to free using
  * the boundary bitmap and clears the allocation map.
+ *
+ * RETURNS:
+ * Number of freed bytes.
  */
-static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
+static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
 {
        struct pcpu_block_md *chunk_md = &chunk->chunk_md;
-       int bit_off, bits, end, oslot;
+       int bit_off, bits, end, oslot, freed;
 
        lockdep_assert_held(&pcpu_lock);
        pcpu_stats_area_dealloc(chunk);
@@ -1230,8 +1242,10 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
        bits = end - bit_off;
        bitmap_clear(chunk->alloc_map, bit_off, bits);
 
+       freed = bits * PCPU_MIN_ALLOC_SIZE;
+
        /* update metadata */
-       chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
+       chunk->free_bytes += freed;
 
        /* update first free bit */
        chunk_md->first_free = min(chunk_md->first_free, bit_off);
@@ -1239,6 +1253,8 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
        pcpu_block_update_hint_free(chunk, bit_off, bits);
 
        pcpu_chunk_relocate(chunk, oslot);
+
+       return freed;
 }
 
 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
@@ -1334,6 +1350,10 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
                panic("%s: Failed to allocate %zu bytes\n", __func__,
                      alloc_size);
 
+#ifdef CONFIG_MEMCG_KMEM
+       /* first chunk isn't memcg-aware */
+       chunk->obj_cgroups = NULL;
+#endif
        pcpu_init_md_blocks(chunk);
 
        /* manage populated page bitmap */
@@ -1373,7 +1393,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
        return chunk;
 }
 
-static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
+static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
 {
        struct pcpu_chunk *chunk;
        int region_bits;
@@ -1401,6 +1421,16 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
        if (!chunk->md_blocks)
                goto md_blocks_fail;
 
+#ifdef CONFIG_MEMCG_KMEM
+       if (pcpu_is_memcg_chunk(type)) {
+               chunk->obj_cgroups =
+                       pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
+                                       sizeof(struct obj_cgroup *), gfp);
+               if (!chunk->obj_cgroups)
+                       goto objcg_fail;
+       }
+#endif
+
        pcpu_init_md_blocks(chunk);
 
        /* init metadata */
@@ -1408,6 +1438,10 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
 
        return chunk;
 
+#ifdef CONFIG_MEMCG_KMEM
+objcg_fail:
+       pcpu_mem_free(chunk->md_blocks);
+#endif
 md_blocks_fail:
        pcpu_mem_free(chunk->bound_map);
 bound_map_fail:
@@ -1422,6 +1456,9 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 {
        if (!chunk)
                return;
+#ifdef CONFIG_MEMCG_KMEM
+       pcpu_mem_free(chunk->obj_cgroups);
+#endif
        pcpu_mem_free(chunk->md_blocks);
        pcpu_mem_free(chunk->bound_map);
        pcpu_mem_free(chunk->alloc_map);
@@ -1498,7 +1535,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
                               int page_start, int page_end, gfp_t gfp);
 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
                                  int page_start, int page_end);
-static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
+static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
+                                           gfp_t gfp);
 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 static struct page *pcpu_addr_to_page(void *addr);
 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
@@ -1540,6 +1578,87 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
        return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 }
 
+#ifdef CONFIG_MEMCG_KMEM
+static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
+                                                    struct obj_cgroup **objcgp)
+{
+       struct obj_cgroup *objcg;
+
+       if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT) ||
+           memcg_kmem_bypass())
+               return PCPU_CHUNK_ROOT;
+
+       objcg = get_obj_cgroup_from_current();
+       if (!objcg)
+               return PCPU_CHUNK_ROOT;
+
+       if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
+               obj_cgroup_put(objcg);
+               return PCPU_FAIL_ALLOC;
+       }
+
+       *objcgp = objcg;
+       return PCPU_CHUNK_MEMCG;
+}
+
+static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
+                                      struct pcpu_chunk *chunk, int off,
+                                      size_t size)
+{
+       if (!objcg)
+               return;
+
+       if (chunk) {
+               chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
+
+               rcu_read_lock();
+               mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
+                               size * num_possible_cpus());
+               rcu_read_unlock();
+       } else {
+               obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+               obj_cgroup_put(objcg);
+       }
+}
+
+static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
+{
+       struct obj_cgroup *objcg;
+
+       if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
+               return;
+
+       objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
+       chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
+
+       obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+
+       rcu_read_lock();
+       mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
+                       -(size * num_possible_cpus()));
+       rcu_read_unlock();
+
+       obj_cgroup_put(objcg);
+}
+
+#else /* CONFIG_MEMCG_KMEM */
+static enum pcpu_chunk_type
+pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
+{
+       return PCPU_CHUNK_ROOT;
+}
+
+static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
+                                      struct pcpu_chunk *chunk, int off,
+                                      size_t size)
+{
+}
+
+static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
 /**
  * pcpu_alloc - the percpu allocator
  * @size: size of area to allocate in bytes
@@ -1561,6 +1680,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
        gfp_t pcpu_gfp;
        bool is_atomic;
        bool do_warn;
+       enum pcpu_chunk_type type;
+       struct list_head *pcpu_slot;
+       struct obj_cgroup *objcg = NULL;
        static int warn_limit = 10;
        struct pcpu_chunk *chunk, *next;
        const char *err;
@@ -1595,16 +1717,23 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                return NULL;
        }
 
+       type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
+       if (unlikely(type == PCPU_FAIL_ALLOC))
+               return NULL;
+       pcpu_slot = pcpu_chunk_list(type);
+
        if (!is_atomic) {
                /*
                 * pcpu_balance_workfn() allocates memory under this mutex,
                 * and it may wait for memory reclaim. Allow current task
                 * to become OOM victim, in case of memory pressure.
                 */
-               if (gfp & __GFP_NOFAIL)
+               if (gfp & __GFP_NOFAIL) {
                        mutex_lock(&pcpu_alloc_mutex);
-               else if (mutex_lock_killable(&pcpu_alloc_mutex))
+               } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
+                       pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
                        return NULL;
+               }
        }
 
        spin_lock_irqsave(&pcpu_lock, flags);
@@ -1659,7 +1788,7 @@ restart:
        }
 
        if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
-               chunk = pcpu_create_chunk(pcpu_gfp);
+               chunk = pcpu_create_chunk(type, pcpu_gfp);
                if (!chunk) {
                        err = "failed to allocate new chunk";
                        goto fail;
@@ -1716,6 +1845,8 @@ area_found:
        trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
                        chunk->base_addr, off, ptr);
 
+       pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
+
        return ptr;
 
 fail_unlock:
@@ -1737,6 +1868,9 @@ fail:
        } else {
                mutex_unlock(&pcpu_alloc_mutex);
        }
+
+       pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
+
        return NULL;
 }
 
@@ -1796,8 +1930,8 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 }
 
 /**
- * pcpu_balance_workfn - manage the amount of free chunks and populated pages
- * @work: unused
+ * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
+ * @type: chunk type
  *
  * Reclaim all fully free chunks except for the first one.  This is also
  * responsible for maintaining the pool of empty populated pages.  However,
@@ -1806,11 +1940,12 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
  * allocation causes the failure as it is possible that requests can be
  * serviced from already backed regions.
  */
-static void pcpu_balance_workfn(struct work_struct *work)
+static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
 {
        /* gfp flags passed to underlying allocators */
        const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
        LIST_HEAD(to_free);
+       struct list_head *pcpu_slot = pcpu_chunk_list(type);
        struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
        struct pcpu_chunk *chunk, *next;
        int slot, nr_to_pop, ret;
@@ -1908,7 +2043,7 @@ retry_pop:
 
        if (nr_to_pop) {
                /* ran out of chunks to populate, create a new one and retry */
-               chunk = pcpu_create_chunk(gfp);
+               chunk = pcpu_create_chunk(type, gfp);
                if (chunk) {
                        spin_lock_irq(&pcpu_lock);
                        pcpu_chunk_relocate(chunk, -1);
@@ -1921,6 +2056,20 @@ retry_pop:
 }
 
 /**
+ * pcpu_balance_workfn - manage the amount of free chunks and populated pages
+ * @work: unused
+ *
+ * Call __pcpu_balance_workfn() for each chunk type.
+ */
+static void pcpu_balance_workfn(struct work_struct *work)
+{
+       enum pcpu_chunk_type type;
+
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               __pcpu_balance_workfn(type);
+}
+
+/**
  * free_percpu - free percpu area
  * @ptr: pointer to area to free
  *
@@ -1934,8 +2083,9 @@ void free_percpu(void __percpu *ptr)
        void *addr;
        struct pcpu_chunk *chunk;
        unsigned long flags;
-       int off;
+       int size, off;
        bool need_balance = false;
+       struct list_head *pcpu_slot;
 
        if (!ptr)
                return;
@@ -1949,7 +2099,11 @@ void free_percpu(void __percpu *ptr)
        chunk = pcpu_chunk_addr_search(addr);
        off = addr - chunk->base_addr;
 
-       pcpu_free_area(chunk, off);
+       size = pcpu_free_area(chunk, off);
+
+       pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
+
+       pcpu_memcg_free_hook(chunk, off, size);
 
        /* if there are more than one fully free chunks, wake up grim reaper */
        if (chunk->free_bytes == pcpu_unit_size) {
@@ -2260,6 +2414,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        int map_size;
        unsigned long tmp_addr;
        size_t alloc_size;
+       enum pcpu_chunk_type type;
 
 #define PCPU_SETUP_BUG_ON(cond)        do {                                    \
        if (unlikely(cond)) {                                           \
@@ -2377,13 +2532,18 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         * empty chunks.
         */
        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
-       pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
-                                  SMP_CACHE_BYTES);
-       if (!pcpu_slot)
+       pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
+                                         sizeof(pcpu_chunk_lists[0]) *
+                                         PCPU_NR_CHUNK_TYPES,
+                                         SMP_CACHE_BYTES);
+       if (!pcpu_chunk_lists)
                panic("%s: Failed to allocate %zu bytes\n", __func__,
-                     pcpu_nr_slots * sizeof(pcpu_slot[0]));
-       for (i = 0; i < pcpu_nr_slots; i++)
-               INIT_LIST_HEAD(&pcpu_slot[i]);
+                     pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
+                     PCPU_NR_CHUNK_TYPES);
+
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               for (i = 0; i < pcpu_nr_slots; i++)
+                       INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
 
        /*
         * The end of the static region needs to be aligned with the
index cc85ce8..29c0520 100644 (file)
@@ -105,7 +105,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
                 * current/current->mm
                 */
                mmap_read_lock(mm);
-               pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages,
+               pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
                                                     flags, process_pages,
                                                     NULL, &locked);
                if (locked)
index 5fe2ded..83cc459 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -672,7 +672,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
  */
 void flush_tlb_batched_pending(struct mm_struct *mm)
 {
-       if (mm->tlb_flush_batched) {
+       if (data_race(mm->tlb_flush_batched)) {
                flush_tlb_mm(mm);
 
                /*
@@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page,
        }
 
        if (first) {
-               int nr = compound ? hpage_nr_pages(page) : 1;
+               int nr = compound ? thp_nr_pages(page) : 1;
                /*
                 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
                 * these counters are not modified in interrupt context, and
@@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page,
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address, bool compound)
 {
-       int nr = compound ? hpage_nr_pages(page) : 1;
+       int nr = compound ? thp_nr_pages(page) : 1;
 
        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
        __SetPageSwapBacked(page);
@@ -1469,7 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * do this outside rmap routines.
                         */
                        VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-                       if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+                       if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
                                /*
                                 * huge_pmd_unshare unmapped an entire PMD
                                 * page.  There is no way of knowing exactly
@@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
@@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                return;
 
        pgoff_start = page_to_pgoff(page);
-       pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+       pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
        if (!locked)
                i_mmap_lock_read(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap,
index 2a99df7..2613371 100644 (file)
@@ -7,6 +7,7 @@
  */
 #define pr_fmt(fmt) "rodata_test: " fmt
 
+#include <linux/rodata_test.h>
 #include <linux/uaccess.h>
 #include <asm/sections.h>
 
index eb6b36d..271548c 100644 (file)
@@ -1434,7 +1434,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                list_add(&info->swaplist, &shmem_swaplist);
 
        if (add_to_swap_cache(page, swap,
-                       __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) {
+                       __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
+                       NULL) == 0) {
                spin_lock_irq(&info->lock);
                shmem_recalc_inode(inode);
                info->swapped++;
@@ -1685,7 +1686,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
  * Swap in the page pointed to by *pagep.
  * Caller has to make sure that *pagep contains a valid swapped page.
  * Returns 0 and the page in pagep if success. On failure, returns the
- * the error code and NULL in *pagep.
+ * error code and NULL in *pagep.
  */
 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
                             struct page **pagep, enum sgp_type sgp,
index a513f32..f9ccd5d 100644 (file)
@@ -419,7 +419,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
        /*
         * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
         * @slab_caches_to_rcu_destroy list.  The slab pages are freed
-        * through RCU and and the associated kmem_cache are dereferenced
+        * through RCU and the associated kmem_cache are dereferenced
         * while freeing the pages, so the kmem_caches should be freed only
         * after the pending RCU operations are finished.  As rcu_barrier()
         * is a pretty slow operation, we batch all pending destructions
index de257c0..d16d65d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                add_page_to_lru_list_tail(page, lruvec, page_lru(page));
-               (*pgmoved) += hpage_nr_pages(page);
+               (*pgmoved) += thp_nr_pages(page);
        }
 }
 
@@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 void lru_note_cost_page(struct page *page)
 {
        lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
-                     page_is_file_lru(page), hpage_nr_pages(page));
+                     page_is_file_lru(page), thp_nr_pages(page));
 }
 
 static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
@@ -476,30 +476,31 @@ void lru_cache_add(struct page *page)
 EXPORT_SYMBOL(lru_cache_add);
 
 /**
- * lru_cache_add_active_or_unevictable
+ * lru_cache_add_inactive_or_unevictable
  * @page:  the page to be added to LRU
  * @vma:   vma in which page is mapped for determining reclaimability
  *
- * Place @page on the active or unevictable LRU list, depending on its
+ * Place @page on the inactive or unevictable LRU list, depending on its
  * evictability.  Note that if the page is not evictable, it goes
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
+void lru_cache_add_inactive_or_unevictable(struct page *page,
                                         struct vm_area_struct *vma)
 {
+       bool unevictable;
+
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
-       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
-               SetPageActive(page);
-       else if (!TestSetPageMlocked(page)) {
+       unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
+       if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
                /*
                 * We use the irq-unsafe __mod_zone_page_stat because this
                 * counter is not modified from interrupt context, and the pte
                 * lock is held(spinlock), which implies preemption disabled.
                 */
                __mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
        }
        lru_cache_add(page);
@@ -531,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 {
        int lru;
        bool active;
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        if (!PageLRU(page))
                return;
@@ -579,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
                ClearPageActive(page);
@@ -598,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
                bool active = PageActive(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec,
                                       LRU_INACTIVE_ANON + active);
@@ -631,7 +632,8 @@ void lru_add_drain_cpu(int cpu)
                __pagevec_lru_add(pvec);
 
        pvec = &per_cpu(lru_rotate.pvec, cpu);
-       if (pagevec_count(pvec)) {
+       /* Disabling interrupts below acts as a compiler barrier. */
+       if (data_race(pagevec_count(pvec))) {
                unsigned long flags;
 
                /* No harm done if a racing interrupt already did this */
@@ -792,7 +794,7 @@ void lru_add_drain_all(void)
                struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
                if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
-                   pagevec_count(&per_cpu(lru_rotate.pvec, cpu)) ||
+                   data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
@@ -971,7 +973,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 {
        enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
index e82f4f8..c16eebb 100644 (file)
@@ -57,8 +57,8 @@ static bool enable_vma_readahead __read_mostly = true;
 #define GET_SWAP_RA_VAL(vma)                                   \
        (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
 
-#define INC_CACHE_INFO(x)      do { swap_cache_info.x++; } while (0)
-#define ADD_CACHE_INFO(x, nr)  do { swap_cache_info.x += (nr); } while (0)
+#define INC_CACHE_INFO(x)      data_race(swap_cache_info.x++)
+#define ADD_CACHE_INFO(x, nr)  data_race(swap_cache_info.x += (nr))
 
 static struct {
        unsigned long add_total;
@@ -106,16 +106,32 @@ void show_swap_cache_info(void)
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       struct address_space *address_space = swap_address_space(entry);
+       pgoff_t idx = swp_offset(entry);
+       struct page *page;
+
+       page = find_get_entry(address_space, idx);
+       if (xa_is_value(page))
+               return page;
+       if (page)
+               put_page(page);
+       return NULL;
+}
+
 /*
  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
+int add_to_swap_cache(struct page *page, swp_entry_t entry,
+                       gfp_t gfp, void **shadowp)
 {
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
        XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = hpage_nr_pages(page);
+       unsigned long i, nr = thp_nr_pages(page);
+       void *old;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -125,16 +141,25 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
        SetPageSwapCache(page);
 
        do {
+               unsigned long nr_shadows = 0;
+
                xas_lock_irq(&xas);
                xas_create_range(&xas);
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < nr; i++) {
                        VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+                       old = xas_load(&xas);
+                       if (xa_is_value(old)) {
+                               nr_shadows++;
+                               if (shadowp)
+                                       *shadowp = old;
+                       }
                        set_page_private(page + i, entry.val + i);
                        xas_store(&xas, page);
                        xas_next(&xas);
                }
+               address_space->nrexceptional -= nr_shadows;
                address_space->nrpages += nr;
                __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
                ADD_CACHE_INFO(add_total, nr);
@@ -154,10 +179,11 @@ unlock:
  * This must be called only on pages that have
  * been verified to be in the swap cache.
  */
-void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
+void __delete_from_swap_cache(struct page *page,
+                       swp_entry_t entry, void *shadow)
 {
        struct address_space *address_space = swap_address_space(entry);
-       int i, nr = hpage_nr_pages(page);
+       int i, nr = thp_nr_pages(page);
        pgoff_t idx = swp_offset(entry);
        XA_STATE(xas, &address_space->i_pages, idx);
 
@@ -166,12 +192,14 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
        VM_BUG_ON_PAGE(PageWriteback(page), page);
 
        for (i = 0; i < nr; i++) {
-               void *entry = xas_store(&xas, NULL);
+               void *entry = xas_store(&xas, shadow);
                VM_BUG_ON_PAGE(entry != page, entry);
                set_page_private(page + i, 0);
                xas_next(&xas);
        }
        ClearPageSwapCache(page);
+       if (shadow)
+               address_space->nrexceptional += nr;
        address_space->nrpages -= nr;
        __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
        ADD_CACHE_INFO(del_total, nr);
@@ -208,7 +236,7 @@ int add_to_swap(struct page *page)
         * Add it to the swap cache.
         */
        err = add_to_swap_cache(page, entry,
-                       __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+                       __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
        if (err)
                /*
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -246,11 +274,42 @@ void delete_from_swap_cache(struct page *page)
        struct address_space *address_space = swap_address_space(entry);
 
        xa_lock_irq(&address_space->i_pages);
-       __delete_from_swap_cache(page, entry);
+       __delete_from_swap_cache(page, entry, NULL);
        xa_unlock_irq(&address_space->i_pages);
 
        put_swap_page(page, entry);
-       page_ref_sub(page, hpage_nr_pages(page));
+       page_ref_sub(page, thp_nr_pages(page));
+}
+
+void clear_shadow_from_swap_cache(int type, unsigned long begin,
+                               unsigned long end)
+{
+       unsigned long curr = begin;
+       void *old;
+
+       for (;;) {
+               unsigned long nr_shadows = 0;
+               swp_entry_t entry = swp_entry(type, curr);
+               struct address_space *address_space = swap_address_space(entry);
+               XA_STATE(xas, &address_space->i_pages, curr);
+
+               xa_lock_irq(&address_space->i_pages);
+               xas_for_each(&xas, old, end) {
+                       if (!xa_is_value(old))
+                               continue;
+                       xas_store(&xas, NULL);
+                       nr_shadows++;
+               }
+               address_space->nrexceptional -= nr_shadows;
+               xa_unlock_irq(&address_space->i_pages);
+
+               /* search the next swapcache until we meet end */
+               curr >>= SWAP_ADDRESS_SPACE_SHIFT;
+               curr++;
+               curr <<= SWAP_ADDRESS_SPACE_SHIFT;
+               if (curr > end)
+                       break;
+       }
 }
 
 /* 
@@ -361,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 {
        struct swap_info_struct *si;
        struct page *page;
+       void *shadow = NULL;
 
        *new_page_allocated = false;
 
@@ -429,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
@@ -439,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                goto fail_unlock;
        }
 
-       /* XXX: Move to lru_cache_add() when it supports new vs putback */
-       spin_lock_irq(&page_pgdat(page)->lru_lock);
-       lru_note_cost_page(page);
-       spin_unlock_irq(&page_pgdat(page)->lru_lock);
+       if (shadow)
+               workingset_refault(page, shadow);
 
        /* Caller will initiate read into locked page */
        SetPageWorkingset(page);
index 6c26916..12f59e6 100644 (file)
@@ -672,7 +672,7 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
        if (offset == si->lowest_bit)
                si->lowest_bit += nr_entries;
        if (end == si->highest_bit)
-               si->highest_bit -= nr_entries;
+               WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
        si->inuse_pages += nr_entries;
        if (si->inuse_pages == si->pages) {
                si->lowest_bit = si->max;
@@ -696,6 +696,7 @@ static void add_to_avail_list(struct swap_info_struct *p)
 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
                            unsigned int nr_entries)
 {
+       unsigned long begin = offset;
        unsigned long end = offset + nr_entries - 1;
        void (*swap_slot_free_notify)(struct block_device *, unsigned long);
 
@@ -704,7 +705,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
        if (end > si->highest_bit) {
                bool was_full = !si->highest_bit;
 
-               si->highest_bit = end;
+               WRITE_ONCE(si->highest_bit, end);
                if (was_full && (si->flags & SWP_WRITEOK))
                        add_to_avail_list(si);
        }
@@ -721,6 +722,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
                        swap_slot_free_notify(si->bdev, offset);
                offset++;
        }
+       clear_shadow_from_swap_cache(si->type, begin, end);
 }
 
 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
@@ -868,7 +870,7 @@ checks:
                else
                        goto done;
        }
-       si->swap_map[offset] = usage;
+       WRITE_ONCE(si->swap_map[offset], usage);
        inc_cluster_info_page(si, si->cluster_info, offset);
        unlock_cluster(ci);
 
@@ -927,12 +929,13 @@ done:
 
 scan:
        spin_unlock(&si->lock);
-       while (++offset <= si->highest_bit) {
-               if (!si->swap_map[offset]) {
+       while (++offset <= READ_ONCE(si->highest_bit)) {
+               if (data_race(!si->swap_map[offset])) {
                        spin_lock(&si->lock);
                        goto checks;
                }
-               if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+               if (vm_swap_full() &&
+                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
                        spin_lock(&si->lock);
                        goto checks;
                }
@@ -944,11 +947,12 @@ scan:
        }
        offset = si->lowest_bit;
        while (offset < scan_base) {
-               if (!si->swap_map[offset]) {
+               if (data_race(!si->swap_map[offset])) {
                        spin_lock(&si->lock);
                        goto checks;
                }
-               if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+               if (vm_swap_full() &&
+                   READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
                        spin_lock(&si->lock);
                        goto checks;
                }
@@ -1147,7 +1151,7 @@ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
        p = swp_swap_info(entry);
        if (!p)
                goto bad_nofile;
-       if (!(p->flags & SWP_USED))
+       if (data_race(!(p->flags & SWP_USED)))
                goto bad_device;
        offset = swp_offset(entry);
        if (offset >= p->max)
@@ -1173,7 +1177,7 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
        p = __swap_info_get(entry);
        if (!p)
                goto out;
-       if (!p->swap_map[swp_offset(entry)])
+       if (data_race(!p->swap_map[swp_offset(entry)]))
                goto bad_free;
        return p;
 
@@ -1242,7 +1246,10 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
        }
 
        usage = count | has_cache;
-       p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
+       if (usage)
+               WRITE_ONCE(p->swap_map[offset], usage);
+       else
+               WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
 
        return usage;
 }
@@ -1294,7 +1301,7 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
                goto bad_nofile;
 
        rcu_read_lock();
-       if (!(si->flags & SWP_VALID))
+       if (data_race(!(si->flags & SWP_VALID)))
                goto unlock_out;
        offset = swp_offset(entry);
        if (offset >= si->max)
@@ -1368,7 +1375,7 @@ void put_swap_page(struct page *page, swp_entry_t entry)
        unsigned char *map;
        unsigned int i, free_entries = 0;
        unsigned char val;
-       int size = swap_entry_size(hpage_nr_pages(page));
+       int size = swap_entry_size(thp_nr_pages(page));
 
        si = _swap_info_get(entry);
        if (!si)
@@ -1915,7 +1922,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                page_add_anon_rmap(page, vma, addr, false);
        } else { /* ksm created a completely new copy */
                page_add_new_anon_rmap(page, vma, addr, false);
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        }
        swap_free(entry);
        /*
@@ -3482,7 +3489,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
        } else
                err = -ENOENT;                  /* unused swap entry */
 
-       p->swap_map[offset] = count | has_cache;
+       WRITE_ONCE(p->swap_map[offset], count | has_cache);
 
 unlock_out:
        unlock_cluster_or_swap_info(p, ci);
index 660717a..b3de3c4 100644 (file)
@@ -43,7 +43,7 @@ static noinline int check_stack_object(const void *obj, unsigned long len)
 
        /*
         * Reject: object partially overlaps the stack (passing the
-        * the check above means at least one end is within the stack,
+        * check above means at least one end is within the stack,
         * so if this check fails, the other end is outside the stack).
         */
        if (obj < stack || stackend < obj + len)
index b804193..9a3d451 100644 (file)
@@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
        inc_mm_counter(dst_mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
-       lru_cache_add_active_or_unevictable(page, dst_vma);
+       lru_cache_add_inactive_or_unevictable(page, dst_vma);
 
        set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 
index b482d24..be4724b 100644 (file)
@@ -104,6 +104,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
                if (pmd_none_or_clear_bad(pmd))
                        continue;
                vunmap_pte_range(pmd, addr, next, mask);
+
+               cond_resched();
        } while (pmd++, addr = next, addr != end);
 }
 
index 72da290..99e1796 100644 (file)
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 {
        unsigned long flags;
        int refcount;
+       void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
-               __delete_from_swap_cache(page, swap);
+               if (reclaimed && !mapping_exiting(mapping))
+                       shadow = workingset_eviction(page, target_memcg);
+               __delete_from_swap_cache(page, swap, shadow);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
-               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
-               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
                /*
@@ -998,8 +999,6 @@ static enum page_references page_check_references(struct page *page,
                return PAGEREF_RECLAIM;
 
        if (referenced_ptes) {
-               if (PageSwapBacked(page))
-                       return PAGEREF_ACTIVATE;
                /*
                 * All mapped pages start out with page table
                 * references from the instantiating fault, so we need
@@ -1022,7 +1021,7 @@ static enum page_references page_check_references(struct page *page,
                /*
                 * Activate file-backed executable pages after first usage.
                 */
-               if (vm_flags & VM_EXEC)
+               if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
                        return PAGEREF_ACTIVATE;
 
                return PAGEREF_KEEP;
@@ -1355,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               stat->nr_pageout += hpage_nr_pages(page);
+                               stat->nr_pageout += thp_nr_pages(page);
 
                                if (PageWriteback(page))
                                        goto keep;
@@ -1863,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                SetPageLRU(page);
                lru = page_lru(page);
 
-               nr_pages = hpage_nr_pages(page);
+               nr_pages = thp_nr_pages(page);
                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
 
@@ -2066,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                         * so we ignore them here.
                         */
                        if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
-                               nr_rotated += hpage_nr_pages(page);
+                               nr_rotated += thp_nr_pages(page);
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -2685,7 +2684,10 @@ again:
        if (!sc->force_deactivate) {
                unsigned long refaults;
 
-               if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+               refaults = lruvec_page_state(target_lruvec,
+                               WORKINGSET_ACTIVATE_ANON);
+               if (refaults != target_lruvec->refaults[0] ||
+                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
                        sc->may_deactivate |= DEACTIVATE_ANON;
                else
                        sc->may_deactivate &= ~DEACTIVATE_ANON;
@@ -2696,8 +2698,8 @@ again:
                 * rid of any stale active pages quickly.
                 */
                refaults = lruvec_page_state(target_lruvec,
-                                            WORKINGSET_ACTIVATE);
-               if (refaults != target_lruvec->refaults ||
+                               WORKINGSET_ACTIVATE_FILE);
+               if (refaults != target_lruvec->refaults[1] ||
                    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
                        sc->may_deactivate |= DEACTIVATE_FILE;
                else
@@ -2796,7 +2798,7 @@ again:
                        set_bit(PGDAT_DIRTY, &pgdat->flags);
 
                /*
-                * If kswapd scans pages marked marked for immediate
+                * If kswapd scans pages marked for immediate
                 * reclaim and under writeback (nr_immediate), it
                 * implies that pages are cycling through the LRU
                 * faster than they are written so also forcibly stall.
@@ -2974,8 +2976,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
        unsigned long refaults;
 
        target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
-       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
-       target_lruvec->refaults = refaults;
+       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
+       target_lruvec->refaults[0] = refaults;
+       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
+       target_lruvec->refaults[1] = refaults;
 }
 
 /*
@@ -3369,7 +3373,7 @@ static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
        /*
         * Check for watermark boosts top-down as the higher zones
         * are more likely to be boosted. Both watermarks and boosts
-        * should not be checked at the time time as reclaim would
+        * should not be checked at the same time as reclaim would
         * start prematurely when there is no boosting and a lower
         * zone is balanced.
         */
index 2b866cb..e670f91 100644 (file)
@@ -1096,6 +1096,24 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
        return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
 }
 
+/*
+ * Calculates external fragmentation within a zone wrt the given order.
+ * It is defined as the percentage of pages found in blocks of size
+ * less than 1 << order. It returns values in range [0, 100].
+ */
+unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
+{
+       struct contig_page_info info;
+
+       fill_contig_page_info(zone, order, &info);
+       if (info.free_pages == 0)
+               return 0;
+
+       return div_u64((info.free_pages -
+                       (info.free_blocks_suitable << order)) * 100,
+                       info.free_pages);
+}
+
 /* Same as __fragmentation index but allocs contig_page_info on stack */
 int fragmentation_index(struct zone *zone, unsigned int order)
 {
@@ -1167,9 +1185,12 @@ const char * const vmstat_text[] = {
        "nr_isolated_anon",
        "nr_isolated_file",
        "workingset_nodes",
-       "workingset_refault",
-       "workingset_activate",
-       "workingset_restore",
+       "workingset_refault_anon",
+       "workingset_refault_file",
+       "workingset_activate_anon",
+       "workingset_activate_file",
+       "workingset_restore_anon",
+       "workingset_restore_file",
        "workingset_nodereclaim",
        "nr_anon_pages",
        "nr_mapped",
@@ -1256,6 +1277,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_MIGRATION
        "pgmigrate_success",
        "pgmigrate_fail",
+       "thp_migration_success",
+       "thp_migration_fail",
+       "thp_migration_split",
 #endif
 #ifdef CONFIG_COMPACTION
        "compact_migrate_scanned",
@@ -1618,12 +1642,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   zone->present_pages,
                   zone_managed_pages(zone));
 
-       /* If unpopulated, no other information is useful */
-       if (!populated_zone(zone)) {
-               seq_putc(m, '\n');
-               return;
-       }
-
        seq_printf(m,
                   "\n        protection: (%ld",
                   zone->lowmem_reserve[0]);
@@ -1631,6 +1649,12 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
        seq_putc(m, ')');
 
+       /* If unpopulated, no other information is useful */
+       if (!populated_zone(zone)) {
+               seq_putc(m, '\n');
+               return;
+       }
+
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
                seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
                           zone_page_state(zone, i));
index b199726..92e6611 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
 #include <linux/writeback.h>
 #include <linux/shmem_fs.h>
 #include <linux/pagemap.h>
@@ -262,7 +263,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
        lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
        /* XXX: target_memcg can be NULL, go through lruvec */
        memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
        eviction = atomic_long_read(&lruvec->nonresident_age);
@@ -280,6 +281,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
  */
 void workingset_refault(struct page *page, void *shadow)
 {
+       bool file = page_is_file_lru(page);
        struct mem_cgroup *eviction_memcg;
        struct lruvec *eviction_lruvec;
        unsigned long refault_distance;
@@ -346,27 +348,34 @@ void workingset_refault(struct page *page, void *shadow)
        memcg = page_memcg(page);
        lruvec = mem_cgroup_lruvec(memcg, pgdat);
 
-       inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
+       inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
 
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
-        * all the memory was available to the page cache. Whether
-        * cache can compete with anon or not depends on having swap.
+        * all the memory was available to the workingset. Whether
+        * workingset competition needs to consider anon or not depends
+        * on having swap.
         */
        workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
-       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
+       if (!file) {
                workingset_size += lruvec_page_state(eviction_lruvec,
-                                                    NR_INACTIVE_ANON);
+                                                    NR_INACTIVE_FILE);
+       }
+       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
                workingset_size += lruvec_page_state(eviction_lruvec,
                                                     NR_ACTIVE_ANON);
+               if (file) {
+                       workingset_size += lruvec_page_state(eviction_lruvec,
+                                                    NR_INACTIVE_ANON);
+               }
        }
        if (refault_distance > workingset_size)
                goto out;
 
        SetPageActive(page);
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
-       inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
+       inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
 
        /* Page was active prior to eviction */
        if (workingset) {
@@ -375,7 +384,7 @@ void workingset_refault(struct page *page, void *shadow)
                spin_lock_irq(&page_pgdat(page)->lru_lock);
                lru_note_cost_page(page);
                spin_unlock_irq(&page_pgdat(page)->lru_lock);
-               inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
+               inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
        }
 out:
        rcu_read_unlock();
@@ -402,7 +411,7 @@ void workingset_activation(struct page *page)
        if (!mem_cgroup_disabled() && !memcg)
                goto out;
        lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
+       workingset_age_nonresident(lruvec, thp_nr_pages(page));
 out:
        rcu_read_unlock();
 }
index 8636692..3744a2d 100644 (file)
@@ -239,15 +239,15 @@ const char *zpool_get_type(struct zpool *zpool)
 }
 
 /**
- * zpool_malloc_support_movable() - Check if the zpool support
- * allocate movable memory
+ * zpool_malloc_support_movable() - Check if the zpool supports
+ *     allocating movable memory
  * @zpool:     The zpool to check
  *
- * This returns if the zpool support allocate movable memory.
+ * This returns if the zpool supports allocating movable memory.
  *
  * Implementations must guarantee this to be thread-safe.
  *
- * Returns: true if if the zpool support allocate movable memory, false if not
+ * Returns: true if the zpool supports allocating movable memory, false if not
  */
 bool zpool_malloc_support_movable(struct zpool *zpool)
 {
index 952a01e..c36fdff 100644 (file)
@@ -79,7 +79,7 @@
 
 /*
  * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (unsigned long) handle value.
+ * a single (unsigned long) handle value.
  *
  * Note that object index <obj_idx> starts from 0.
  *
index 12ecacf..c0762a3 100644 (file)
@@ -950,7 +950,7 @@ static int p9_bind_privport(struct socket *sock)
 
        memset(&cl, 0, sizeof(cl));
        cl.sin_family = AF_INET;
-       cl.sin_addr.s_addr = INADDR_ANY;
+       cl.sin_addr.s_addr = htonl(INADDR_ANY);
        for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
                cl.sin_port = htons((ushort)port);
                err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
index 1641f41..ebe33b6 100644 (file)
@@ -2238,6 +2238,10 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
        struct ebt_table *t;
        struct net *net = sock_net(sk);
 
+       if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
+           *len != sizeof(struct compat_ebt_replace))
+               return -EINVAL;
+
        if (copy_from_user(&tmp, user, sizeof(tmp)))
                return -EFAULT;
 
index 8096732..8d033a7 100644 (file)
@@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
 static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
                                     const struct nf_hook_state *state)
 {
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
        enum ip_conntrack_info ctinfo;
        struct br_input_skb_cb cb;
@@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
 
        br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
 
-       err = nf_ipv6_br_defrag(state->net, skb,
-                               IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
+       err = nf_ct_frag6_gather(state->net, skb,
+                                IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
        /* queued */
        if (err == -EINPROGRESS)
                return NF_STOLEN;
 
        br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
        return err == 0 ? NF_ACCEPT : NF_DROP;
+#else
+       return NF_ACCEPT;
+#endif
 }
 
 static int nf_ct_br_ip_check(const struct sk_buff *skb)
index 78ff9b3..b93876c 100644 (file)
@@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
        spin_lock_init(&jsk->sk_session_queue_lock);
        INIT_LIST_HEAD(&jsk->sk_session_queue);
        sk->sk_destruct = j1939_sk_sock_destruct;
+       sk->sk_protocol = CAN_J1939;
 
        return 0;
 }
@@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                        goto out_release_sock;
                }
 
+               if (!ndev->ml_priv) {
+                       netdev_warn_once(ndev,
+                                        "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+                       dev_put(ndev);
+                       ret = -ENODEV;
+                       goto out_release_sock;
+               }
+
                priv = j1939_netdev_start(ndev);
                dev_put(ndev);
                if (IS_ERR(priv)) {
@@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
                                       const struct j1939_sock *jsk, int peer)
 {
+       /* There are two holes (2 bytes and 3 bytes) to clear to avoid
+        * leaking kernel information to user space.
+        */
+       memset(addr, 0, J1939_MIN_NAMELEN);
+
        addr->can_family = AF_CAN;
        addr->can_ifindex = jsk->ifindex;
        addr->can_addr.j1939.pgn = jsk->addr.pgn;
index 9f99af5..a8dd956 100644 (file)
@@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
        skb_queue_tail(&session->skb_queue, skb);
 }
 
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct
+sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+                                         unsigned int offset_start)
 {
        struct j1939_priv *priv = session->priv;
+       struct j1939_sk_buff_cb *do_skcb;
        struct sk_buff *skb = NULL;
        struct sk_buff *do_skb;
-       struct j1939_sk_buff_cb *do_skcb;
-       unsigned int offset_start;
        unsigned long flags;
 
-       offset_start = session->pkt.dpo * 7;
-
        spin_lock_irqsave(&session->skb_queue.lock, flags);
        skb_queue_walk(&session->skb_queue, do_skb) {
                do_skcb = j1939_skb_to_cb(do_skb);
@@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
        return skb;
 }
 
+static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+{
+       unsigned int offset_start;
+
+       offset_start = session->pkt.dpo * 7;
+       return j1939_session_skb_find_by_offset(session, offset_start);
+}
+
 /* see if we are receiver
  * returns 0 for broadcasts, although we will receive them
  */
@@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
                return ret;
 
        session->last_txcmd = dat[0];
-       if (dat[0] == J1939_TP_CMD_BAM)
+       if (dat[0] == J1939_TP_CMD_BAM) {
                j1939_tp_schedule_txtimer(session, 50);
-
-       j1939_tp_set_rxtimeout(session, 1250);
+               j1939_tp_set_rxtimeout(session, 250);
+       } else {
+               j1939_tp_set_rxtimeout(session, 1250);
+       }
 
        netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
 
@@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        int ret = 0;
        u8 dat[8];
 
-       se_skb = j1939_session_skb_find(session);
+       se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
        if (!se_skb)
                return -ENOBUFS;
 
@@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
                if (len > 7)
                        len = 7;
 
+               if (offset + len > se_skb->len) {
+                       netdev_err_once(priv->ndev,
+                                       "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+                                       __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+                       return -EOVERFLOW;
+               }
+
+               if (!len) {
+                       ret = -ENOBUFS;
+                       break;
+               }
+
                memcpy(&dat[1], &tpdat[offset], len);
                ret = j1939_tp_tx_dat(session, dat, len + 1);
                if (ret < 0) {
@@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
        lockdep_assert_held(&session->priv->active_session_list_lock);
 
        session->err = j1939_xtp_abort_to_errno(priv, err);
+       session->state = J1939_SESSION_WAITING_ABORT;
        /* do not send aborts on incoming broadcasts */
        if (!j1939_cb_is_broadcast(&session->skcb)) {
-               session->state = J1939_SESSION_WAITING_ABORT;
                j1939_xtp_tx_abort(priv, &session->skcb,
                                   !session->transmission,
                                   err, session->skcb.addr.pgn);
@@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
                 * cleanup including propagation of the error to user space.
                 */
                break;
+       case -EOVERFLOW:
+               j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
+               break;
        case 0:
                session->tx_retry = 0;
                break;
@@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
                        return;
                }
                session = j1939_xtp_rx_rts_session_new(priv, skb);
-               if (!session)
+               if (!session) {
+                       if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
+                               netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
+                                           __func__);
                        return;
+               }
        } else {
                if (j1939_xtp_rx_rts_session_active(session, skb)) {
                        j1939_session_put(session);
@@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
        }
        session->last_cmd = cmd;
 
-       j1939_tp_set_rxtimeout(session, 1250);
-
-       if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
-               j1939_session_txtimer_cancel(session);
-               j1939_tp_schedule_txtimer(session, 0);
+       if (cmd == J1939_TP_CMD_BAM) {
+               if (!session->transmission)
+                       j1939_tp_set_rxtimeout(session, 750);
+       } else {
+               if (!session->transmission) {
+                       j1939_session_txtimer_cancel(session);
+                       j1939_tp_schedule_txtimer(session, 0);
+               }
+               j1939_tp_set_rxtimeout(session, 1250);
        }
 
        j1939_session_put(session);
@@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        int offset;
        int nbytes;
        bool final = false;
+       bool remain = false;
        bool do_cts_eoma = false;
        int packet;
 
@@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                            __func__, session);
                goto out_session_cancel;
        }
-       se_skb = j1939_session_skb_find(session);
+
+       se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
        if (!se_skb) {
                netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
                            session);
@@ -1769,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        }
 
        tpdat = se_skb->data;
-       memcpy(&tpdat[offset], &dat[1], nbytes);
+       if (!session->transmission) {
+               memcpy(&tpdat[offset], &dat[1], nbytes);
+       } else {
+               int err;
+
+               err = memcmp(&tpdat[offset], &dat[1], nbytes);
+               if (err)
+                       netdev_err_once(priv->ndev,
+                                       "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
+                                       __func__, session,
+                                       nbytes, &dat[1],
+                                       nbytes, &tpdat[offset]);
+       }
+
        if (packet == session->pkt.rx)
                session->pkt.rx++;
 
@@ -1777,6 +1824,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
            j1939_cb_is_broadcast(&session->skcb)) {
                if (session->pkt.rx >= session->pkt.total)
                        final = true;
+               else
+                       remain = true;
        } else {
                /* never final, an EOMA must follow */
                if (session->pkt.rx >= session->pkt.last)
@@ -1784,7 +1833,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        }
 
        if (final) {
+               j1939_session_timers_cancel(session);
                j1939_session_completed(session);
+       } else if (remain) {
+               if (!session->transmission)
+                       j1939_tp_set_rxtimeout(session, 750);
        } else if (do_cts_eoma) {
                j1939_tp_set_rxtimeout(session, 1250);
                if (!session->transmission)
@@ -1829,6 +1882,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
                else
                        j1939_xtp_rx_dat_one(session, skb);
        }
+
+       if (j1939_cb_is_broadcast(skcb)) {
+               session = j1939_session_get_by_addr(priv, &skcb->addr, false,
+                                                   false);
+               if (session)
+                       j1939_xtp_rx_dat_one(session, skb);
+       }
 }
 
 /* j1939 main intf */
@@ -1920,7 +1980,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
                if (j1939_tp_im_transmitter(skcb))
                        j1939_xtp_rx_rts(priv, skb, true);
 
-               if (j1939_tp_im_receiver(skcb))
+               if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
                        j1939_xtp_rx_rts(priv, skb, false);
 
                break;
@@ -1984,7 +2044,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
 {
        struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
 
-       if (!j1939_tp_im_involved_anydir(skcb))
+       if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
                return 0;
 
        switch (skcb->addr.pgn) {
@@ -2017,6 +2077,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
        if (!skb->sk)
                return;
 
+       if (skb->sk->sk_family != AF_CAN ||
+           skb->sk->sk_protocol != CAN_J1939)
+               return;
+
        j1939_session_list_lock(priv);
        session = j1939_session_get_simple(priv, skb);
        j1939_session_list_unlock(priv);
index d7bec7a..f36f9a3 100644 (file)
@@ -13,7 +13,7 @@ config CEPH_LIB
          common functionality to both the Ceph filesystem and
          to the rados block device (rbd).
 
-         More information at http://ceph.newdream.net/.
+         More information at https://ceph.io/.
 
          If unsure, say N.
 
index 9a5850f..81e1e00 100644 (file)
@@ -4,7 +4,7 @@
 
 /*
  * Robert Jenkin's hash function.
- * http://burtleburtle.net/bob/hash/evahash.html
+ * https://burtleburtle.net/bob/hash/evahash.html
  * This is in the public domain.
  */
 #define mix(a, b, c)                                           \
index e5cc603..fe79f6d 100644 (file)
@@ -7,7 +7,7 @@
 
 /*
  * Robert Jenkins' function for mixing 32-bit values
- * http://burtleburtle.net/bob/hash/evahash.html
+ * https://burtleburtle.net/bob/hash/evahash.html
  * a, b = random bits, c = input and output
  */
 #define crush_hashmix(a, b, c) do {                    \
index 3f323ed..07e5614 100644 (file)
@@ -298,7 +298,7 @@ static __u64 crush_ln(unsigned int xin)
  *
  * for reference, see:
  *
- * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
+ * https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
  *
  */
 
index 409d505..2110439 100644 (file)
@@ -223,6 +223,9 @@ static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
                if (op->op == CEPH_OSD_OP_WATCH)
                        seq_printf(s, "-%s",
                                   ceph_osd_watch_op_name(op->watch.op));
+               else if (op->op == CEPH_OSD_OP_CALL)
+                       seq_printf(s, "-%s/%s", op->cls.class_name,
+                                  op->cls.method_name);
        }
 
        seq_putc(s, '\n');
index 2db8b44..e4fbcad 100644 (file)
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(ceph_osdc_put_request);
 
 static void request_init(struct ceph_osd_request *req)
 {
-       /* req only, each op is zeroed in _osd_req_op_init() */
+       /* req only, each op is zeroed in osd_req_op_init() */
        memset(req, 0, sizeof(*req));
 
        kref_init(&req->r_kref);
@@ -746,8 +746,8 @@ EXPORT_SYMBOL(ceph_osdc_alloc_messages);
  * other information associated with them.  It also serves as a
  * common init routine for all the other init functions, below.
  */
-static struct ceph_osd_req_op *
-_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
+struct ceph_osd_req_op *
+osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
                 u16 opcode, u32 flags)
 {
        struct ceph_osd_req_op *op;
@@ -762,12 +762,6 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
 
        return op;
 }
-
-void osd_req_op_init(struct ceph_osd_request *osd_req,
-                    unsigned int which, u16 opcode, u32 flags)
-{
-       (void)_osd_req_op_init(osd_req, which, opcode, flags);
-}
 EXPORT_SYMBOL(osd_req_op_init);
 
 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
@@ -775,8 +769,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
                                u64 offset, u64 length,
                                u64 truncate_size, u32 truncate_seq)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
-                                                     opcode, 0);
+       struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
+                                                    opcode, 0);
        size_t payload_len = 0;
 
        BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
@@ -822,7 +816,7 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
        BUG_ON(which + 1 >= osd_req->r_num_ops);
 
        prev_op = &osd_req->r_ops[which];
-       op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
+       op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
        /* dup previous one */
        op->indata_len = prev_op->indata_len;
        op->outdata_len = prev_op->outdata_len;
@@ -845,7 +839,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
        size_t size;
        int ret;
 
-       op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
+       op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
 
        pagelist = ceph_pagelist_alloc(GFP_NOFS);
        if (!pagelist)
@@ -883,8 +877,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
                          u16 opcode, const char *name, const void *value,
                          size_t size, u8 cmp_op, u8 cmp_mode)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
-                                                     opcode, 0);
+       struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which,
+                                                    opcode, 0);
        struct ceph_pagelist *pagelist;
        size_t payload_len;
        int ret;
@@ -928,7 +922,7 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
 {
        struct ceph_osd_req_op *op;
 
-       op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
+       op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
        op->watch.cookie = cookie;
        op->watch.op = watch_opcode;
        op->watch.gen = 0;
@@ -943,10 +937,9 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
                                u64 expected_write_size,
                                u32 flags)
 {
-       struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
-                                                     CEPH_OSD_OP_SETALLOCHINT,
-                                                     0);
+       struct ceph_osd_req_op *op;
 
+       op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0);
        op->alloc_hint.expected_object_size = expected_object_size;
        op->alloc_hint.expected_write_size = expected_write_size;
        op->alloc_hint.flags = flags;
@@ -3076,9 +3069,7 @@ static void send_linger(struct ceph_osd_linger_request *lreq)
                cancel_linger_request(req);
 
        request_reinit(req);
-       ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
-       ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
-       req->r_flags = lreq->t.flags;
+       target_copy(&req->r_t, &lreq->t);
        req->r_mtime = lreq->mtime;
 
        mutex_lock(&lreq->lock);
@@ -4801,7 +4792,7 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
        struct ceph_pagelist *pl;
        int ret;
 
-       op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
+       op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
 
        pl = ceph_pagelist_alloc(GFP_NOIO);
        if (!pl)
@@ -4870,7 +4861,7 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
        struct ceph_pagelist *pl;
        int ret;
 
-       op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
+       op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
        op->notify.cookie = cookie;
 
        pl = ceph_pagelist_alloc(GFP_NOIO);
@@ -5334,8 +5325,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
-                             dst_fadvise_flags);
+       op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
+                            dst_fadvise_flags);
        op->copy_from.snapid = src_snapid;
        op->copy_from.src_version = src_version;
        op->copy_from.flags = copy_from_flags;
index d3377c9..b988f48 100644 (file)
@@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
        return 0;
 }
 
-static int bpf_iter_check_map(struct bpf_prog *prog,
-                             struct bpf_iter_aux_info *aux)
+static int bpf_iter_attach_map(struct bpf_prog *prog,
+                              union bpf_iter_link_info *linfo,
+                              struct bpf_iter_aux_info *aux)
 {
-       struct bpf_map *map = aux->map;
+       struct bpf_map *map;
+       int err = -EINVAL;
+
+       if (!linfo->map.map_fd)
+               return -EBADF;
+
+       map = bpf_map_get_with_uref(linfo->map.map_fd);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
 
        if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
-               return -EINVAL;
+               goto put_map;
 
-       if (prog->aux->max_rdonly_access > map->value_size)
-               return -EACCES;
+       if (prog->aux->max_rdonly_access > map->value_size) {
+               err = -EACCES;
+               goto put_map;
+       }
 
+       aux->map = map;
        return 0;
+
+put_map:
+       bpf_map_put_with_uref(map);
+       return err;
+}
+
+static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
+{
+       bpf_map_put_with_uref(aux->map);
 }
 
 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
@@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
 
 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
        .target                 = "bpf_sk_storage_map",
-       .check_target           = bpf_iter_check_map,
-       .req_linfo              = BPF_ITER_LINK_MAP_FD,
+       .attach_target          = bpf_iter_attach_map,
+       .detach_target          = bpf_iter_detach_map,
        .ctx_arg_info_size      = 2,
        .ctx_arg_info           = {
                { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
index 7df6c96..b5d1129 100644 (file)
@@ -8913,10 +8913,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                NL_SET_ERR_MSG(extack, "Active program does not match expected");
                return -EEXIST;
        }
-       if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
-               NL_SET_ERR_MSG(extack, "XDP program already attached");
-               return -EBUSY;
-       }
 
        /* put effective new program into new_prog */
        if (link)
@@ -8927,6 +8923,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
                                               ? XDP_MODE_DRV : XDP_MODE_SKB;
 
+               if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
+                       NL_SET_ERR_MSG(extack, "XDP program already attached");
+                       return -EBUSY;
+               }
                if (!offload && dev_xdp_prog(dev, other_mode)) {
                        NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
                        return -EEXIST;
index e674f0f..e5feb87 100644 (file)
@@ -4063,7 +4063,7 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
 {
        lockdep_assert_held(&devlink->lock);
 
-       if (WARN_ON(xa_load(&devlink->snapshot_ids, id)))
+       if (xa_load(&devlink->snapshot_ids, id))
                return -EEXIST;
 
        return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
index 7124f0f..b2df520 100644 (file)
@@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 /* Helper macro for adding read access to tcp_sock or sock fields. */
 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                        \
        do {                                                                  \
+               int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \
                BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
                             sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
+                                         offsetof(struct bpf_sock_ops_kern,  \
+                                         temp));                             \
+                       fullsock_reg = reg;                                   \
+                       jmp += 2;                                             \
+               }                                                             \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
                                                struct bpf_sock_ops_kern,     \
                                                is_fullsock),                 \
-                                     si->dst_reg, si->src_reg,               \
+                                     fullsock_reg, si->src_reg,              \
                                      offsetof(struct bpf_sock_ops_kern,      \
                                               is_fullsock));                 \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);            \
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
+               if (si->dst_reg == si->src_reg)                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
                                                struct bpf_sock_ops_kern, sk),\
                                      si->dst_reg, si->src_reg,               \
@@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                                                       OBJ_FIELD),            \
                                      si->dst_reg, si->dst_reg,               \
                                      offsetof(OBJ, OBJ_FIELD));              \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_JMP_A(1);                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               }                                                             \
+       } while (0)
+
+#define SOCK_OPS_GET_SK()                                                            \
+       do {                                                                  \
+               int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
+                                         offsetof(struct bpf_sock_ops_kern,  \
+                                         temp));                             \
+                       fullsock_reg = reg;                                   \
+                       jmp += 2;                                             \
+               }                                                             \
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
+                                               struct bpf_sock_ops_kern,     \
+                                               is_fullsock),                 \
+                                     fullsock_reg, si->src_reg,              \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                              is_fullsock));                 \
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
+               if (si->dst_reg == si->src_reg)                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
+                                               struct bpf_sock_ops_kern, sk),\
+                                     si->dst_reg, si->src_reg,               \
+                                     offsetof(struct bpf_sock_ops_kern, sk));\
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_JMP_A(1);                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               }                                                             \
        } while (0)
 
 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
@@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
                break;
        case offsetof(struct bpf_sock_ops, sk):
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-                                               struct bpf_sock_ops_kern,
-                                               is_fullsock),
-                                     si->dst_reg, si->src_reg,
-                                     offsetof(struct bpf_sock_ops_kern,
-                                              is_fullsock));
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-                                               struct bpf_sock_ops_kern, sk),
-                                     si->dst_reg, si->src_reg,
-                                     offsetof(struct bpf_sock_ops_kern, sk));
+               SOCK_OPS_GET_SK();
                break;
        }
        return insn - insn_buf;
index 9de33b5..efec66f 100644 (file)
@@ -757,11 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
                return err;
        }
 
-       hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
-       cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
-       if (cpumask_empty(mask)) {
-               free_cpumask_var(mask);
-               return -EINVAL;
+       if (!cpumask_empty(mask)) {
+               hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
+               cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
+               if (cpumask_empty(mask)) {
+                       free_cpumask_var(mask);
+                       return -EINVAL;
+               }
        }
 
        map = kzalloc(max_t(unsigned int,
index 2828f6d..5c3b906 100644 (file)
@@ -4853,7 +4853,7 @@ static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
        if (err < 0)
                goto out;
 
-       if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+       if (ip_is_fragment(ip_hdr(skb)))
                fragment = true;
 
        off = ip_hdrlen(skb);
@@ -5418,8 +5418,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (unlikely(!skb))
                goto err_free;
-
-       if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+       /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
+       if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
                goto err_free;
 
        vhdr = (struct vlan_hdr *)skb->data;
index a2044b4..e4f40b1 100644 (file)
@@ -3414,6 +3414,16 @@ static void sock_inuse_add(struct net *net, int val)
 }
 #endif
 
+static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
+{
+       if (!twsk_prot)
+               return;
+       kfree(twsk_prot->twsk_slab_name);
+       twsk_prot->twsk_slab_name = NULL;
+       kmem_cache_destroy(twsk_prot->twsk_slab);
+       twsk_prot->twsk_slab = NULL;
+}
+
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
 {
        if (!rsk_prot)
@@ -3484,7 +3494,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
-                               goto out_free_timewait_sock_slab_name;
+                               goto out_free_timewait_sock_slab;
                }
        }
 
@@ -3492,15 +3502,15 @@ int proto_register(struct proto *prot, int alloc_slab)
        ret = assign_proto_idx(prot);
        if (ret) {
                mutex_unlock(&proto_list_mutex);
-               goto out_free_timewait_sock_slab_name;
+               goto out_free_timewait_sock_slab;
        }
        list_add(&prot->node, &proto_list);
        mutex_unlock(&proto_list_mutex);
        return ret;
 
-out_free_timewait_sock_slab_name:
+out_free_timewait_sock_slab:
        if (alloc_slab && prot->twsk_prot)
-               kfree(prot->twsk_prot->twsk_slab_name);
+               tw_prot_cleanup(prot->twsk_prot);
 out_free_request_sock_slab:
        if (alloc_slab) {
                req_prot_cleanup(prot->rsk_prot);
@@ -3524,12 +3534,7 @@ void proto_unregister(struct proto *prot)
        prot->slab = NULL;
 
        req_prot_cleanup(prot->rsk_prot);
-
-       if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
-               kmem_cache_destroy(prot->twsk_prot->twsk_slab);
-               kfree(prot->twsk_prot->twsk_slab_name);
-               prot->twsk_prot->twsk_slab = NULL;
-       }
+       tw_prot_cleanup(prot->twsk_prot);
 }
 EXPORT_SYMBOL(proto_unregister);
 
index 545b264..1b34cb9 100644 (file)
@@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
        return bpfilter_mbox_request(sk, optname, optval, optlen, true);
 }
 
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname,
-                           char __user *user_optval, int __user *optlen)
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+                           int __user *optlen)
 {
-       sockptr_t optval;
-       int err, len;
+       int len;
 
        if (get_user(len, optlen))
                return -EFAULT;
-       err = init_user_sockptr(&optval, user_optval, len);
-       if (err)
-               return err;
-       return bpfilter_mbox_request(sk, optname, optval, len, false);
+
+       return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len,
+                                    false);
 }
 
 static int __init bpfilter_sockopt_init(void)
index d1a3913..b457dd2 100644 (file)
@@ -296,6 +296,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
                                    ipv6_only_sock(sk), true, false);
 }
 
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+                              struct sock *sk)
+{
+       kuid_t uid = sock_i_uid(sk);
+       bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+
+       if (hlist_empty(&tb->owners)) {
+               tb->fastreuse = reuse;
+               if (sk->sk_reuseport) {
+                       tb->fastreuseport = FASTREUSEPORT_ANY;
+                       tb->fastuid = uid;
+                       tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+                       tb->fast_ipv6_only = ipv6_only_sock(sk);
+                       tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+                       tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+               } else {
+                       tb->fastreuseport = 0;
+               }
+       } else {
+               if (!reuse)
+                       tb->fastreuse = 0;
+               if (sk->sk_reuseport) {
+                       /* We didn't match or we don't have fastreuseport set on
+                        * the tb, but we have sk_reuseport set on this socket
+                        * and we know that there are no bind conflicts with
+                        * this socket in this tb, so reset our tb's reuseport
+                        * settings so that any subsequent sockets that match
+                        * our current socket will be put on the fast path.
+                        *
+                        * If we reset we need to set FASTREUSEPORT_STRICT so we
+                        * do extra checking for all subsequent sk_reuseport
+                        * socks.
+                        */
+                       if (!sk_reuseport_match(tb, sk)) {
+                               tb->fastreuseport = FASTREUSEPORT_STRICT;
+                               tb->fastuid = uid;
+                               tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+                               tb->fast_ipv6_only = ipv6_only_sock(sk);
+                               tb->fast_sk_family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+                               tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+                       }
+               } else {
+                       tb->fastreuseport = 0;
+               }
+       }
+}
+
 /* Obtain a reference to a local port for the given sock,
  * if snum is zero it means select any available local port.
  * We try to allocate an odd port (and leave even ports for connect())
@@ -308,7 +359,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
        struct inet_bind_hashbucket *head;
        struct net *net = sock_net(sk);
        struct inet_bind_bucket *tb = NULL;
-       kuid_t uid = sock_i_uid(sk);
        int l3mdev;
 
        l3mdev = inet_sk_bound_l3mdev(sk);
@@ -345,49 +395,8 @@ tb_found:
                        goto fail_unlock;
        }
 success:
-       if (hlist_empty(&tb->owners)) {
-               tb->fastreuse = reuse;
-               if (sk->sk_reuseport) {
-                       tb->fastreuseport = FASTREUSEPORT_ANY;
-                       tb->fastuid = uid;
-                       tb->fast_rcv_saddr = sk->sk_rcv_saddr;
-                       tb->fast_ipv6_only = ipv6_only_sock(sk);
-                       tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
-                       tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
-               } else {
-                       tb->fastreuseport = 0;
-               }
-       } else {
-               if (!reuse)
-                       tb->fastreuse = 0;
-               if (sk->sk_reuseport) {
-                       /* We didn't match or we don't have fastreuseport set on
-                        * the tb, but we have sk_reuseport set on this socket
-                        * and we know that there are no bind conflicts with
-                        * this socket in this tb, so reset our tb's reuseport
-                        * settings so that any subsequent sockets that match
-                        * our current socket will be put on the fast path.
-                        *
-                        * If we reset we need to set FASTREUSEPORT_STRICT so we
-                        * do extra checking for all subsequent sk_reuseport
-                        * socks.
-                        */
-                       if (!sk_reuseport_match(tb, sk)) {
-                               tb->fastreuseport = FASTREUSEPORT_STRICT;
-                               tb->fastuid = uid;
-                               tb->fast_rcv_saddr = sk->sk_rcv_saddr;
-                               tb->fast_ipv6_only = ipv6_only_sock(sk);
-                               tb->fast_sk_family = sk->sk_family;
-#if IS_ENABLED(CONFIG_IPV6)
-                               tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-#endif
-                       }
-               } else {
-                       tb->fastreuseport = 0;
-               }
-       }
+       inet_csk_update_fastreuse(tb, sk);
+
        if (!inet_csk(sk)->icsk_bind_hash)
                inet_bind_hash(sk, tb, port);
        WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
index 4eb4cd8..239e544 100644 (file)
@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
                                return -ENOMEM;
                        }
                }
+               inet_csk_update_fastreuse(tb, child);
        }
        inet_bind_hash(child, tb, port);
        spin_unlock(&head->lock);
index 5653e3b..54023a4 100644 (file)
@@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
                                            2 * TCP_FASTOPEN_KEY_MAX) +
                                            (TCP_FASTOPEN_KEY_MAX * 5)) };
-       struct tcp_fastopen_context *ctx;
-       u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
-       __le32 key[TCP_FASTOPEN_KEY_MAX * 4];
+       u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
+       __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
        char *backup_data;
-       int ret, i = 0, off = 0, n_keys = 0;
+       int ret, i = 0, off = 0, n_keys;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
                return -ENOMEM;
 
-       rcu_read_lock();
-       ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
-       if (ctx) {
-               n_keys = tcp_fastopen_context_len(ctx);
-               memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
-       }
-       rcu_read_unlock();
-
+       n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
        if (!n_keys) {
                memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
                n_keys = 1;
index c06d2bf..31f3b85 100644 (file)
@@ -3685,22 +3685,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                return 0;
 
        case TCP_FASTOPEN_KEY: {
-               __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
-               struct tcp_fastopen_context *ctx;
-               unsigned int key_len = 0;
+               u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
+               unsigned int key_len;
 
                if (get_user(len, optlen))
                        return -EFAULT;
 
-               rcu_read_lock();
-               ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
-               if (ctx) {
-                       key_len = tcp_fastopen_context_len(ctx) *
-                                       TCP_FASTOPEN_KEY_LENGTH;
-                       memcpy(&key[0], &ctx->key[0], key_len);
-               }
-               rcu_read_unlock();
-
+               key_len = tcp_fastopen_get_cipher(net, icsk, key) *
+                               TCP_FASTOPEN_KEY_LENGTH;
                len = min_t(unsigned int, len, key_len);
                if (put_user(len, optlen))
                        return -EFAULT;
index c1a54f3..09b62de 100644 (file)
@@ -108,6 +108,29 @@ out:
        return err;
 }
 
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+                           u64 *key)
+{
+       struct tcp_fastopen_context *ctx;
+       int n_keys = 0, i;
+
+       rcu_read_lock();
+       if (icsk)
+               ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
+       else
+               ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
+       if (ctx) {
+               n_keys = tcp_fastopen_context_len(ctx);
+               for (i = 0; i < n_keys; i++) {
+                       put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
+                       put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
+               }
+       }
+       rcu_read_unlock();
+
+       return n_keys;
+}
+
 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
                                             struct sk_buff *syn,
                                             const siphash_key_t *key,
index 409e79b..6d0e942 100644 (file)
@@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = {
        .route_input            = ip6_route_input,
        .fragment               = ip6_fragment,
        .reroute                = nf_ip6_reroute,
-#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
-       .br_defrag              = nf_ct_frag6_gather,
-#endif
 #if IS_MODULE(CONFIG_IPV6)
        .br_fragment            = br_ip6_fragment,
 #endif
index 8c1d1a5..1aad411 100644 (file)
@@ -725,8 +725,10 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                if (!psize)
                        return -EINVAL;
 
-               if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
+               if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
+                       iov_iter_revert(&msg->msg_iter, psize);
                        return -ENOMEM;
+               }
        } else {
                offset = dfrag->offset;
                psize = min_t(size_t, dfrag->data_len, avail_size);
@@ -737,8 +739,11 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
         */
        ret = do_tcp_sendpages(ssk, page, offset, psize,
                               msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
-       if (ret <= 0)
+       if (ret <= 0) {
+               if (!retransmission)
+                       iov_iter_revert(&msg->msg_iter, psize);
                return ret;
+       }
 
        frag_truesize += ret;
        if (!retransmission) {
@@ -1388,7 +1393,9 @@ static void mptcp_worker(struct work_struct *work)
        struct mptcp_data_frag *dfrag;
        u64 orig_write_seq;
        size_t copied = 0;
-       struct msghdr msg;
+       struct msghdr msg = {
+               .msg_flags = MSG_DONTWAIT,
+       };
        long timeo = 0;
 
        lock_sock(sk);
@@ -1421,7 +1428,6 @@ static void mptcp_worker(struct work_struct *work)
 
        lock_sock(ssk);
 
-       msg.msg_flags = MSG_DONTWAIT;
        orig_len = dfrag->data_len;
        orig_offset = dfrag->offset;
        orig_write_seq = dfrag->data_seq;
index 96f4f2f..e8cac26 100644 (file)
@@ -423,12 +423,12 @@ static void mptcp_sock_destruct(struct sock *sk)
         * also remove the mptcp socket, via
         * sock_put(ctx->conn).
         *
-        * Problem is that the mptcp socket will not be in
-        * SYN_RECV state and doesn't have SOCK_DEAD flag.
+        * Problem is that the mptcp socket will be in
+        * ESTABLISHED state and will not have the SOCK_DEAD flag.
         * Both result in warnings from inet_sock_destruct.
         */
 
-       if (sk->sk_state == TCP_SYN_RECV) {
+       if (sk->sk_state == TCP_ESTABLISHED) {
                sk->sk_state = TCP_CLOSE;
                WARN_ON_ONCE(sk->sk_socket);
                sock_orphan(sk);
index e38b60f..5b97d23 100644 (file)
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
 
 unsigned int nf_conntrack_max __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_max);
-seqcount_t nf_conntrack_generation __read_mostly;
+seqcount_spinlock_t nf_conntrack_generation __read_mostly;
 static unsigned int nf_conntrack_hash_rnd __read_mostly;
 
 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
@@ -2588,7 +2588,8 @@ int nf_conntrack_init_start(void)
        /* struct nf_ct_ext uses u8 to store offsets/size */
        BUILD_BUG_ON(total_extension_size() > 255u);
 
-       seqcount_init(&nf_conntrack_generation);
+       seqcount_spinlock_init(&nf_conntrack_generation,
+                              &nf_conntrack_locks_all_lock);
 
        for (i = 0; i < CONNTRACK_LOCKS; i++)
                spin_lock_init(&nf_conntrack_locks[i]);
index d878e34..fd814e5 100644 (file)
@@ -2018,8 +2018,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (nla[NFTA_CHAIN_NAME]) {
                chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
        } else {
-               if (!(flags & NFT_CHAIN_BINDING))
-                       return -EINVAL;
+               if (!(flags & NFT_CHAIN_BINDING)) {
+                       err = -EINVAL;
+                       goto err1;
+               }
 
                snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
                chain->name = kstrdup(name, GFP_KERNEL);
index 6428856..8e56f35 100644 (file)
@@ -27,8 +27,6 @@ struct nft_xt_match_priv {
        void *info;
 };
 
-static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1);
-
 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
                                                const char *tablename)
 {
@@ -215,6 +213,17 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
        return 0;
 }
 
+static void nft_compat_wait_for_destructors(void)
+{
+       /* xtables matches or targets can have side effects, e.g.
+        * creation/destruction of /proc files.
+        * The xt ->destroy functions are run asynchronously from
+        * work queue.  If we have pending invocations we thus
+        * need to wait for those to finish.
+        */
+       nf_tables_trans_destroy_flush_work();
+}
+
 static int
 nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                const struct nlattr * const tb[])
@@ -238,14 +247,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
 
-       /* xtables matches or targets can have side effects, e.g.
-        * creation/destruction of /proc files.
-        * The xt ->destroy functions are run asynchronously from
-        * work queue.  If we have pending invocations we thus
-        * need to wait for those to finish.
-        */
-       if (refcount_read(&nft_compat_pending_destroy) > 1)
-               nf_tables_trans_destroy_flush_work();
+       nft_compat_wait_for_destructors();
 
        ret = xt_check_target(&par, size, proto, inv);
        if (ret < 0)
@@ -260,7 +262,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
 static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
 {
-       refcount_dec(&nft_compat_pending_destroy);
        module_put(me);
        kfree(expr->ops);
 }
@@ -468,6 +469,8 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
 
+       nft_compat_wait_for_destructors();
+
        return xt_check_match(&par, size, proto, inv);
 }
 
@@ -716,14 +719,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
 
 static struct nft_expr_type nft_match_type;
 
-static void nft_mt_tg_deactivate(const struct nft_ctx *ctx,
-                                const struct nft_expr *expr,
-                                enum nft_trans_phase phase)
-{
-       if (phase == NFT_TRANS_COMMIT)
-               refcount_inc(&nft_compat_pending_destroy);
-}
-
 static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
                     const struct nlattr * const tb[])
@@ -762,7 +757,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        ops->type = &nft_match_type;
        ops->eval = nft_match_eval;
        ops->init = nft_match_init;
-       ops->deactivate = nft_mt_tg_deactivate,
        ops->destroy = nft_match_destroy;
        ops->dump = nft_match_dump;
        ops->validate = nft_match_validate;
@@ -853,7 +847,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
        ops->init = nft_target_init;
        ops->destroy = nft_target_destroy;
-       ops->deactivate = nft_mt_tg_deactivate,
        ops->dump = nft_target_dump;
        ops->validate = nft_target_validate;
        ops->data = target;
@@ -917,8 +910,6 @@ static void __exit nft_compat_module_exit(void)
        nfnetlink_subsys_unregister(&nfnl_compat_subsys);
        nft_unregister_expr(&nft_target_type);
        nft_unregister_expr(&nft_match_type);
-
-       WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1);
 }
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
index 0778283..3c48cdc 100644 (file)
@@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
 
        err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
        if (priv->flags & NFT_EXTHDR_F_PRESENT) {
-               *dest = (err >= 0);
+               nft_reg_store8(dest, err >= 0);
                return;
        } else if (err < 0) {
                goto err;
@@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
 
        err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
        if (priv->flags & NFT_EXTHDR_F_PRESENT) {
-               *dest = (err >= 0);
+               nft_reg_store8(dest, err >= 0);
                return;
        } else if (err < 0) {
                goto err;
index b6aad3f..4b2834f 100644 (file)
@@ -18,7 +18,7 @@
 struct nft_rbtree {
        struct rb_root          root;
        rwlock_t                lock;
-       seqcount_t              count;
+       seqcount_rwlock_t       count;
        struct delayed_work     gc_work;
 };
 
@@ -523,7 +523,7 @@ static int nft_rbtree_init(const struct nft_set *set,
        struct nft_rbtree *priv = nft_set_priv(set);
 
        rwlock_init(&priv->lock);
-       seqcount_init(&priv->count);
+       seqcount_rwlock_init(&priv->count, &priv->lock);
        priv->root = RB_ROOT;
 
        INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
index b2061b6..955c195 100644 (file)
@@ -328,10 +328,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
        if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
                return -ESOCKTNOSUPPORT;
 
-       if (sock->type == SOCK_RAW)
+       if (sock->type == SOCK_RAW) {
+               if (!capable(CAP_NET_RAW))
+                       return -EPERM;
                sock->ops = &rawsock_raw_ops;
-       else
+       } else {
                sock->ops = &rawsock_ops;
+       }
 
        sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
        if (!sk)
index 42f8cc7..6e47ef7 100644 (file)
@@ -1756,6 +1756,7 @@ err:
 /* Called with ovs_mutex. */
 static void __dp_destroy(struct datapath *dp)
 {
+       struct flow_table *table = &dp->table;
        int i;
 
        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
@@ -1774,7 +1775,14 @@ static void __dp_destroy(struct datapath *dp)
         */
        ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
 
-       /* RCU destroy the flow table */
+       /* Flush sw_flow in the tables. RCU cb only releases resource
+        * such as dp, ports and tables. That may avoid some issues
+        * such as RCU usage warning.
+        */
+       table_instance_flow_flush(table, ovsl_dereference(table->ti),
+                                 ovsl_dereference(table->ufid_ti));
+
+       /* RCU destroy the ports, meters and flow tables. */
        call_rcu(&dp->rcu, destroy_dp_rcu);
 }
 
index 8c12675..e223584 100644 (file)
@@ -473,19 +473,15 @@ static void table_instance_flow_free(struct flow_table *table,
        flow_mask_remove(table, flow->mask);
 }
 
-static void table_instance_destroy(struct flow_table *table,
-                                  struct table_instance *ti,
-                                  struct table_instance *ufid_ti,
-                                  bool deferred)
+/* Must be called with OVS mutex held. */
+void table_instance_flow_flush(struct flow_table *table,
+                              struct table_instance *ti,
+                              struct table_instance *ufid_ti)
 {
        int i;
 
-       if (!ti)
-               return;
-
-       BUG_ON(!ufid_ti);
        if (ti->keep_flows)
-               goto skip_flows;
+               return;
 
        for (i = 0; i < ti->n_buckets; i++) {
                struct sw_flow *flow;
@@ -497,18 +493,16 @@ static void table_instance_destroy(struct flow_table *table,
 
                        table_instance_flow_free(table, ti, ufid_ti,
                                                 flow, false);
-                       ovs_flow_free(flow, deferred);
+                       ovs_flow_free(flow, true);
                }
        }
+}
 
-skip_flows:
-       if (deferred) {
-               call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
-               call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
-       } else {
-               __table_instance_destroy(ti);
-               __table_instance_destroy(ufid_ti);
-       }
+static void table_instance_destroy(struct table_instance *ti,
+                                  struct table_instance *ufid_ti)
+{
+       call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+       call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
 }
 
 /* No need for locking this function is called from RCU callback or
@@ -523,7 +517,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
 
        call_rcu(&mc->rcu, mask_cache_rcu_cb);
        call_rcu(&ma->rcu, mask_array_rcu_cb);
-       table_instance_destroy(table, ti, ufid_ti, false);
+       table_instance_destroy(ti, ufid_ti);
 }
 
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -641,7 +635,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
        flow_table->count = 0;
        flow_table->ufid_count = 0;
 
-       table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
+       table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
+       table_instance_destroy(old_ti, old_ufid_ti);
        return 0;
 
 err_free_ti:
index 74ce48f..6e7d4ac 100644 (file)
@@ -105,5 +105,8 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                       bool full, const struct sw_flow_mask *mask);
 
 void ovs_flow_masks_rebalance(struct flow_table *table);
+void table_instance_flow_flush(struct flow_table *table,
+                              struct table_instance *ti,
+                              struct table_instance *ufid_ti);
 
 #endif /* flow_table.h */
index 0b8160d..479c257 100644 (file)
@@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 }
 
 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+       __releases(&pkc->blk_fill_in_prog_lock)
 {
        struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 
@@ -989,6 +990,7 @@ static void prb_fill_curr_block(char *curr,
                                struct tpacket_kbdq_core *pkc,
                                struct tpacket_block_desc *pbd,
                                unsigned int len)
+       __acquires(&pkc->blk_fill_in_prog_lock)
 {
        struct tpacket3_hdr *ppd;
 
@@ -2286,8 +2288,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (do_vnet &&
            virtio_net_hdr_from_skb(skb, h.raw + macoff -
                                    sizeof(struct virtio_net_hdr),
-                                   vio_le(), true, 0))
+                                   vio_le(), true, 0)) {
+               if (po->tp_version == TPACKET_V3)
+                       prb_clear_blk_fill_status(&po->rx_ring);
                goto drop_n_account;
+       }
 
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
@@ -2393,7 +2398,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                __clear_bit(slot_id, po->rx_ring.rx_owner_map);
                spin_unlock(&sk->sk_receive_queue.lock);
                sk->sk_data_ready(sk);
-       } else {
+       } else if (po->tp_version == TPACKET_V3) {
                prb_clear_blk_fill_status(&po->rx_ring);
        }
 
index b4c0db0..90c558f 100644 (file)
@@ -692,23 +692,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
  */
 static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
 {
+       u32 min_port;
        int rc;
 
        mutex_lock(&qrtr_port_lock);
        if (!*port) {
-               rc = idr_alloc(&qrtr_ports, ipc,
-                              QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
-                              GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               min_port = QRTR_MIN_EPH_SOCKET;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
+               if (!rc)
+                       *port = min_port;
        } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
                rc = -EACCES;
        } else if (*port == QRTR_PORT_CTRL) {
-               rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+               min_port = 0;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
        } else {
-               rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               min_port = *port;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
+               if (!rc)
+                       *port = min_port;
        }
        mutex_unlock(&qrtr_port_lock);
 
index aff52e8..dbbe8ea 100644 (file)
@@ -500,7 +500,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
        if (f.file) {
                sock = sock_from_file(f.file, err);
                if (likely(sock)) {
-                       *fput_needed = f.flags;
+                       *fput_needed = f.flags & FDPUT_FPUT;
                        return sock;
                }
                fdput(f);
@@ -1325,7 +1325,7 @@ int sock_wake_async(struct socket_wq *wq, int how, int band)
        case SOCK_WAKE_SPACE:
                if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
                        break;
-               /* fall through */
+               fallthrough;
        case SOCK_WAKE_IO:
 call_kill:
                kill_fasync(&wq->fasync_list, SIGIO, band);
@@ -1804,8 +1804,7 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
                                                upeer_addrlen, flags,
                                                rlimit(RLIMIT_NOFILE));
-               if (f.flags)
-                       fput(f.file);
+               fdput(f);
        }
 
        return ret;
@@ -1868,8 +1867,7 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
                ret = move_addr_to_kernel(uservaddr, addrlen, &address);
                if (!ret)
                        ret = __sys_connect_file(f.file, &address, addrlen, 0);
-               if (f.flags)
-                       fput(f.file);
+               fdput(f);
        }
 
        return ret;
@@ -2097,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
 int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
                int optlen)
 {
-       sockptr_t optval;
+       sockptr_t optval = USER_SOCKPTR(user_optval);
        char *kernel_optval = NULL;
        int err, fput_needed;
        struct socket *sock;
@@ -2105,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
        if (optlen < 0)
                return -EINVAL;
 
-       err = init_user_sockptr(&optval, user_optval, optlen);
-       if (err)
-               return err;
-
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                return err;
@@ -3065,7 +3059,7 @@ static int __init sock_init(void)
 
        err = register_filesystem(&sock_fs_type);
        if (err)
-               goto out_fs;
+               goto out;
        sock_mnt = kern_mount(&sock_fs_type);
        if (IS_ERR(sock_mnt)) {
                err = PTR_ERR(sock_mnt);
@@ -3088,7 +3082,6 @@ out:
 
 out_mount:
        unregister_filesystem(&sock_fs_type);
-out_fs:
        goto out;
 }
 
@@ -3161,13 +3154,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
                if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
                        return -ENOMEM;
                buf_size += rule_cnt * sizeof(u32);
-               /* fall through */
+               fallthrough;
        case ETHTOOL_GRXRINGS:
        case ETHTOOL_GRXCLSRLCNT:
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_SRXCLSRLINS:
                convert_out = true;
-               /* fall through */
+               fallthrough;
        case ETHTOOL_SRXCLSRLDEL:
                buf_size += sizeof(struct ethtool_rxnfc);
                convert_in = true;
index cf0fd17..90b8329 100644 (file)
@@ -584,7 +584,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
                                                        buf->head[0].iov_len);
        memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
        buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
-       buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
+       buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
 
        /* Trim off the trailing "extra count" and checksum blob */
        xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
index 46027d0..258b043 100644 (file)
@@ -332,7 +332,7 @@ static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct r
 
 struct gss_svc_seq_data {
        /* highest seq number seen so far: */
-       int                     sd_max;
+       u32                     sd_max;
        /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
         * sd_win is nonzero iff sequence number i has been seen already: */
        unsigned long           sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
@@ -613,16 +613,29 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
        return found;
 }
 
-/* Implements sequence number algorithm as specified in RFC 2203. */
-static int
-gss_check_seq_num(struct rsc *rsci, int seq_num)
+/**
+ * gss_check_seq_num - GSS sequence number window check
+ * @rqstp: RPC Call to use when reporting errors
+ * @rsci: cached GSS context state (updated on return)
+ * @seq_num: sequence number to check
+ *
+ * Implements sequence number algorithm as specified in
+ * RFC 2203, Section 5.3.3.1. "Context Management".
+ *
+ * Return values:
+ *   %true: @rqstp's GSS sequence number is inside the window
+ *   %false: @rqstp's GSS sequence number is outside the window
+ */
+static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
+                             u32 seq_num)
 {
        struct gss_svc_seq_data *sd = &rsci->seqdata;
+       bool result = false;
 
        spin_lock(&sd->sd_lock);
        if (seq_num > sd->sd_max) {
                if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
-                       memset(sd->sd_win,0,sizeof(sd->sd_win));
+                       memset(sd->sd_win, 0, sizeof(sd->sd_win));
                        sd->sd_max = seq_num;
                } else while (sd->sd_max < seq_num) {
                        sd->sd_max++;
@@ -631,17 +644,25 @@ gss_check_seq_num(struct rsc *rsci, int seq_num)
                __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
                goto ok;
        } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
-               goto drop;
+               goto toolow;
        }
-       /* sd_max - GSS_SEQ_WIN < seq_num <= sd_max */
        if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
-               goto drop;
+               goto alreadyseen;
+
 ok:
+       result = true;
+out:
        spin_unlock(&sd->sd_lock);
-       return 1;
-drop:
-       spin_unlock(&sd->sd_lock);
-       return 0;
+       return result;
+
+toolow:
+       trace_rpcgss_svc_seqno_low(rqstp, seq_num,
+                                  sd->sd_max - GSS_SEQ_WIN,
+                                  sd->sd_max);
+       goto out;
+alreadyseen:
+       trace_rpcgss_svc_seqno_seen(rqstp, seq_num);
+       goto out;
 }
 
 static inline u32 round_up_to_quad(u32 i)
@@ -721,14 +742,12 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
        }
 
        if (gc->gc_seq > MAXSEQ) {
-               trace_rpcgss_svc_large_seqno(rqstp->rq_xid, gc->gc_seq);
+               trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
                *authp = rpcsec_gsserr_ctxproblem;
                return SVC_DENIED;
        }
-       if (!gss_check_seq_num(rsci, gc->gc_seq)) {
-               trace_rpcgss_svc_old_seqno(rqstp->rq_xid, gc->gc_seq);
+       if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
                return SVC_DROP;
-       }
        return SVC_OK;
 }
 
@@ -866,11 +885,13 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
 static int
 unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
 {
+       u32 integ_len, rseqno, maj_stat;
        int stat = -EINVAL;
-       u32 integ_len, maj_stat;
        struct xdr_netobj mic;
        struct xdr_buf integ_buf;
 
+       mic.data = NULL;
+
        /* NFS READ normally uses splice to send data in-place. However
         * the data in cache can change after the reply's MIC is computed
         * but before the RPC reply is sent. To prevent the client from
@@ -885,34 +906,44 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
 
        integ_len = svc_getnl(&buf->head[0]);
        if (integ_len & 3)
-               return stat;
+               goto unwrap_failed;
        if (integ_len > buf->len)
-               return stat;
-       if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) {
-               WARN_ON_ONCE(1);
-               return stat;
-       }
+               goto unwrap_failed;
+       if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
+               goto unwrap_failed;
+
        /* copy out mic... */
        if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
-               return stat;
+               goto unwrap_failed;
        if (mic.len > RPC_MAX_AUTH_SIZE)
-               return stat;
+               goto unwrap_failed;
        mic.data = kmalloc(mic.len, GFP_KERNEL);
        if (!mic.data)
-               return stat;
+               goto unwrap_failed;
        if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
-               goto out;
+               goto unwrap_failed;
        maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
        if (maj_stat != GSS_S_COMPLETE)
-               goto out;
-       if (svc_getnl(&buf->head[0]) != seq)
-               goto out;
+               goto bad_mic;
+       rseqno = svc_getnl(&buf->head[0]);
+       if (rseqno != seq)
+               goto bad_seqno;
        /* trim off the mic and padding at the end before returning */
        xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
        stat = 0;
 out:
        kfree(mic.data);
        return stat;
+
+unwrap_failed:
+       trace_rpcgss_svc_unwrap_failed(rqstp);
+       goto out;
+bad_seqno:
+       trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno);
+       goto out;
+bad_mic:
+       trace_rpcgss_svc_mic(rqstp, maj_stat);
+       goto out;
 }
 
 static inline int
@@ -937,6 +968,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
 {
        u32 priv_len, maj_stat;
        int pad, remaining_len, offset;
+       u32 rseqno;
 
        clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
@@ -951,14 +983,13 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
         * not yet read from the head, so these two values are different: */
        remaining_len = total_buf_len(buf);
        if (priv_len > remaining_len)
-               return -EINVAL;
+               goto unwrap_failed;
        pad = remaining_len - priv_len;
        buf->len -= pad;
        fix_priv_head(buf, pad);
 
        maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
        pad = priv_len - buf->len;
-       buf->len -= pad;
        /* The upper layers assume the buffer is aligned on 4-byte boundaries.
         * In the krb5p case, at least, the data ends up offset, so we need to
         * move it around. */
@@ -972,11 +1003,22 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
                fix_priv_head(buf, pad);
        }
        if (maj_stat != GSS_S_COMPLETE)
-               return -EINVAL;
+               goto bad_unwrap;
 out_seq:
-       if (svc_getnl(&buf->head[0]) != seq)
-               return -EINVAL;
+       rseqno = svc_getnl(&buf->head[0]);
+       if (rseqno != seq)
+               goto bad_seqno;
        return 0;
+
+unwrap_failed:
+       trace_rpcgss_svc_unwrap_failed(rqstp);
+       return -EINVAL;
+bad_seqno:
+       trace_rpcgss_svc_seqno_bad(rqstp, seq, rseqno);
+       return -EINVAL;
+bad_unwrap:
+       trace_rpcgss_svc_unwrap(rqstp, maj_stat);
+       return -EINVAL;
 }
 
 struct gss_svc_data {
@@ -1314,8 +1356,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
        if (status)
                goto out;
 
-       trace_rpcgss_svc_accept_upcall(rqstp->rq_xid, ud.major_status,
-                                      ud.minor_status);
+       trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status);
 
        switch (ud.major_status) {
        case GSS_S_CONTINUE_NEEDED:
@@ -1490,8 +1531,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
        int             ret;
        struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
-       trace_rpcgss_svc_accept(rqstp->rq_xid, argv->iov_len);
-
        *authp = rpc_autherr_badcred;
        if (!svcdata)
                svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
@@ -1608,6 +1647,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
                                        GSS_C_QOP_DEFAULT,
                                        gc->gc_svc);
                ret = SVC_OK;
+               trace_rpcgss_svc_authenticate(rqstp, gc);
                goto out;
        }
 garbage_args:
index 49fa583..d26036a 100644 (file)
@@ -5,6 +5,9 @@
 
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svc_xprt.h>
+#include <linux/sunrpc/auth_gss.h>
 #include <linux/sunrpc/gss_err.h>
 #include <linux/sunrpc/auth_gss.h>
 
index e9d0953..eadc0ed 100644 (file)
@@ -1510,6 +1510,6 @@ err_notifier:
 void unregister_rpc_pipefs(void)
 {
        rpc_clients_notifier_unregister();
-       kmem_cache_destroy(rpc_inode_cachep);
        unregister_filesystem(&rpc_pipe_fs_type);
+       kmem_cache_destroy(rpc_inode_cachep);
 }
index d5cc5db..6ba9d58 100644 (file)
@@ -607,6 +607,11 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req)
        req->rq_majortimeo += xprt_calc_majortimeo(req);
 }
 
+static void xprt_reset_minortimeo(struct rpc_rqst *req)
+{
+       req->rq_minortimeo += req->rq_timeout;
+}
+
 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
 {
        unsigned long time_init;
@@ -618,6 +623,7 @@ static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
                time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
        req->rq_timeout = task->tk_client->cl_timeout->to_initval;
        req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
+       req->rq_minortimeo = time_init + req->rq_timeout;
 }
 
 /**
@@ -631,6 +637,8 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
        const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
        int status = 0;
 
+       if (time_before(jiffies, req->rq_minortimeo))
+               return status;
        if (time_before(jiffies, req->rq_majortimeo)) {
                if (to->to_exponential)
                        req->rq_timeout <<= 1;
@@ -649,6 +657,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
                spin_unlock(&xprt->transport_lock);
                status = -ETIMEDOUT;
        }
+       xprt_reset_minortimeo(req);
 
        if (req->rq_timeout == 0) {
                printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
index b647562..7f94c9a 100644 (file)
@@ -40,7 +40,6 @@
  * New MRs are created on demand.
  */
 
-#include <linux/sunrpc/rpc_rdma.h>
 #include <linux/sunrpc/svc_rdma.h>
 
 #include "xprt_rdma.h"
index 453bacc..0f5120c 100644 (file)
@@ -275,14 +275,6 @@ out:
        return n;
 }
 
-static void
-xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
-{
-       *iptr++ = cpu_to_be32(mr->mr_handle);
-       *iptr++ = cpu_to_be32(mr->mr_length);
-       xdr_encode_hyper(iptr, mr->mr_offset);
-}
-
 static int
 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
 {
@@ -292,7 +284,7 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
        if (unlikely(!p))
                return -EMSGSIZE;
 
-       xdr_encode_rdma_segment(p, mr);
+       xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset);
        return 0;
 }
 
@@ -307,8 +299,8 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
                return -EMSGSIZE;
 
        *p++ = xdr_one;                 /* Item present */
-       *p++ = cpu_to_be32(position);
-       xdr_encode_rdma_segment(p, mr);
+       xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length,
+                               mr->mr_offset);
        return 0;
 }
 
@@ -1133,11 +1125,11 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
        p = xdr_inline_decode(xdr, 0);
 
        /* Chunk lists */
-       if (*p++ != xdr_zero)
+       if (xdr_item_is_present(p++))
                return false;
-       if (*p++ != xdr_zero)
+       if (xdr_item_is_present(p++))
                return false;
-       if (*p++ != xdr_zero)
+       if (xdr_item_is_present(p++))
                return false;
 
        /* RPC header */
@@ -1176,10 +1168,7 @@ static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
        if (unlikely(!p))
                return -EIO;
 
-       handle = be32_to_cpup(p++);
-       *length = be32_to_cpup(p++);
-       xdr_decode_hyper(p, &offset);
-
+       xdr_decode_rdma_segment(p, &handle, length, &offset);
        trace_xprtrdma_decode_seg(handle, *length, offset);
        return 0;
 }
@@ -1215,7 +1204,7 @@ static int decode_read_list(struct xdr_stream *xdr)
        p = xdr_inline_decode(xdr, sizeof(*p));
        if (unlikely(!p))
                return -EIO;
-       if (unlikely(*p != xdr_zero))
+       if (unlikely(xdr_item_is_present(p)))
                return -EIO;
        return 0;
 }
@@ -1234,7 +1223,7 @@ static int decode_write_list(struct xdr_stream *xdr, u32 *length)
                p = xdr_inline_decode(xdr, sizeof(*p));
                if (unlikely(!p))
                        return -EIO;
-               if (*p == xdr_zero)
+               if (xdr_item_is_absent(p))
                        break;
                if (!first)
                        return -EIO;
@@ -1256,7 +1245,7 @@ static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
                return -EIO;
 
        *length = 0;
-       if (*p != xdr_zero)
+       if (xdr_item_is_present(p))
                if (decode_write_chunk(xdr, length))
                        return -EIO;
        return 0;
index 1ee73f7..5e7c4ba 100644 (file)
@@ -87,7 +87,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
         */
        get_page(virt_to_page(rqst->rq_buffer));
        ctxt->sc_send_wr.opcode = IB_WR_SEND;
-       return svc_rdma_send(rdma, &ctxt->sc_send_wr);
+       return svc_rdma_send(rdma, ctxt);
 }
 
 /* Server-side transport endpoint wants a whole page for its send
index e426fed..c6ea290 100644 (file)
@@ -117,6 +117,13 @@ svc_rdma_next_recv_ctxt(struct list_head *list)
                                        rc_list);
 }
 
+static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+                                  struct rpc_rdma_cid *cid)
+{
+       cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+       cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
 static struct svc_rdma_recv_ctxt *
 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
@@ -135,6 +142,8 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
        if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
                goto fail2;
 
+       svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
+
        ctxt->rc_recv_wr.next = NULL;
        ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
        ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
@@ -248,16 +257,15 @@ static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
 {
        int ret;
 
-       svc_xprt_get(&rdma->sc_xprt);
+       trace_svcrdma_post_recv(ctxt);
        ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
-       trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
        if (ret)
                goto err_post;
        return 0;
 
 err_post:
+       trace_svcrdma_rq_post_err(rdma, ret);
        svc_rdma_recv_ctxt_put(rdma, ctxt);
-       svc_xprt_put(&rdma->sc_xprt);
        return ret;
 }
 
@@ -265,6 +273,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
 {
        struct svc_rdma_recv_ctxt *ctxt;
 
+       if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
+               return 0;
        ctxt = svc_rdma_recv_ctxt_get(rdma);
        if (!ctxt)
                return -ENOMEM;
@@ -309,11 +319,10 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_cqe *cqe = wc->wr_cqe;
        struct svc_rdma_recv_ctxt *ctxt;
 
-       trace_svcrdma_wc_receive(wc);
-
        /* WARNING: Only wc->wr_cqe and wc->status are reliable */
        ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
 
+       trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
        if (wc->status != IB_WC_SUCCESS)
                goto flushed;
 
@@ -333,15 +342,13 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        spin_unlock(&rdma->sc_rq_dto_lock);
        if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
                svc_xprt_enqueue(&rdma->sc_xprt);
-       goto out;
+       return;
 
 flushed:
 post_err:
        svc_rdma_recv_ctxt_put(rdma, ctxt);
        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
        svc_xprt_enqueue(&rdma->sc_xprt);
-out:
-       svc_xprt_put(&rdma->sc_xprt);
 }
 
 /**
@@ -419,7 +426,7 @@ static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
 
        len = 0;
        first = true;
-       while (*p != xdr_zero) {
+       while (xdr_item_is_present(p)) {
                p = xdr_inline_decode(&rctxt->rc_stream,
                                      rpcrdma_readseg_maxsz * sizeof(*p));
                if (!p)
@@ -466,9 +473,7 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen)
                if (!p)
                        return false;
 
-               handle = be32_to_cpup(p++);
-               length = be32_to_cpup(p++);
-               xdr_decode_hyper(p, &offset);
+               xdr_decode_rdma_segment(p, &handle, &length, &offset);
                trace_svcrdma_decode_wseg(handle, length, offset);
 
                total += length;
@@ -500,7 +505,7 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
        if (!p)
                return false;
        rctxt->rc_write_list = p;
-       while (*p != xdr_zero) {
+       while (xdr_item_is_present(p)) {
                if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK))
                        return false;
                ++chcount;
@@ -532,12 +537,11 @@ static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
        p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
        if (!p)
                return false;
-       rctxt->rc_reply_chunk = p;
-       if (*p != xdr_zero) {
+       rctxt->rc_reply_chunk = NULL;
+       if (xdr_item_is_present(p)) {
                if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK))
                        return false;
-       } else {
-               rctxt->rc_reply_chunk = NULL;
+               rctxt->rc_reply_chunk = p;
        }
        return true;
 }
@@ -568,7 +572,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
        p += rpcrdma_fixed_maxsz;
 
        /* Read list */
-       while (*p++ != xdr_zero) {
+       while (xdr_item_is_present(p++)) {
                p++;    /* position */
                if (inv_rkey == xdr_zero)
                        inv_rkey = *p;
@@ -578,7 +582,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
        }
 
        /* Write list */
-       while (*p++ != xdr_zero) {
+       while (xdr_item_is_present(p++)) {
                segcount = be32_to_cpup(p++);
                for (i = 0; i < segcount; i++) {
                        if (inv_rkey == xdr_zero)
@@ -590,7 +594,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
        }
 
        /* Reply chunk */
-       if (*p++ != xdr_zero) {
+       if (xdr_item_is_present(p++)) {
                segcount = be32_to_cpup(p++);
                for (i = 0; i < segcount; i++) {
                        if (inv_rkey == xdr_zero)
@@ -661,27 +665,27 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
        hdr_len = xdr_stream_pos(&rctxt->rc_stream);
        rq_arg->head[0].iov_len -= hdr_len;
        rq_arg->len -= hdr_len;
-       trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
+       trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
        return hdr_len;
 
 out_short:
-       trace_svcrdma_decode_short_err(rq_arg->len);
+       trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
        return -EINVAL;
 
 out_version:
-       trace_svcrdma_decode_badvers_err(rdma_argp);
+       trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
        return -EPROTONOSUPPORT;
 
 out_drop:
-       trace_svcrdma_decode_drop_err(rdma_argp);
+       trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
        return 0;
 
 out_proc:
-       trace_svcrdma_decode_badproc_err(rdma_argp);
+       trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
        return -EINVAL;
 
 out_inval:
-       trace_svcrdma_decode_parse_err(rdma_argp);
+       trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
        return -EINVAL;
 }
 
@@ -714,57 +718,16 @@ static void rdma_read_complete(struct svc_rqst *rqstp,
        rqstp->rq_arg.buflen = head->rc_arg.buflen;
 }
 
-static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
-                               __be32 *rdma_argp, int status)
+static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
+                               struct svc_rdma_recv_ctxt *rctxt,
+                               int status)
 {
-       struct svc_rdma_send_ctxt *ctxt;
-       __be32 *p;
-       int ret;
+       struct svc_rdma_send_ctxt *sctxt;
 
-       ctxt = svc_rdma_send_ctxt_get(xprt);
-       if (!ctxt)
+       sctxt = svc_rdma_send_ctxt_get(rdma);
+       if (!sctxt)
                return;
-
-       p = xdr_reserve_space(&ctxt->sc_stream,
-                             rpcrdma_fixed_maxsz * sizeof(*p));
-       if (!p)
-               goto put_ctxt;
-
-       *p++ = *rdma_argp;
-       *p++ = *(rdma_argp + 1);
-       *p++ = xprt->sc_fc_credits;
-       *p = rdma_error;
-
-       switch (status) {
-       case -EPROTONOSUPPORT:
-               p = xdr_reserve_space(&ctxt->sc_stream, 3 * sizeof(*p));
-               if (!p)
-                       goto put_ctxt;
-
-               *p++ = err_vers;
-               *p++ = rpcrdma_version;
-               *p = rpcrdma_version;
-               trace_svcrdma_err_vers(*rdma_argp);
-               break;
-       default:
-               p = xdr_reserve_space(&ctxt->sc_stream, sizeof(*p));
-               if (!p)
-                       goto put_ctxt;
-
-               *p = err_chunk;
-               trace_svcrdma_err_chunk(*rdma_argp);
-       }
-
-       ctxt->sc_send_wr.num_sge = 1;
-       ctxt->sc_send_wr.opcode = IB_WR_SEND;
-       ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
-       ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
-       if (ret)
-               goto put_ctxt;
-       return;
-
-put_ctxt:
-       svc_rdma_send_ctxt_put(xprt, ctxt);
+       svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
 }
 
 /* By convention, backchannel calls arrive via rdma_msg type
@@ -900,13 +863,13 @@ out_readchunk:
        return 0;
 
 out_err:
-       svc_rdma_send_error(rdma_xprt, p, ret);
+       svc_rdma_send_error(rdma_xprt, ctxt, ret);
        svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
        return 0;
 
 out_postfail:
        if (ret == -EINVAL)
-               svc_rdma_send_error(rdma_xprt, p, ret);
+               svc_rdma_send_error(rdma_xprt, ctxt, ret);
        svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
        return ret;
 
index 5eb3530..fe54cbe 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <rdma/rw.h>
 
+#include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/rpc_rdma.h>
 #include <linux/sunrpc/svc_rdma.h>
 
@@ -144,17 +145,25 @@ static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
  * demand, and not cached.
  */
 struct svc_rdma_chunk_ctxt {
+       struct rpc_rdma_cid     cc_cid;
        struct ib_cqe           cc_cqe;
        struct svcxprt_rdma     *cc_rdma;
        struct list_head        cc_rwctxts;
        int                     cc_sqecount;
 };
 
+static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
+                                struct rpc_rdma_cid *cid)
+{
+       cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+       cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
                             struct svc_rdma_chunk_ctxt *cc)
 {
+       svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
        cc->cc_rdma = rdma;
-       svc_xprt_get(&rdma->sc_xprt);
 
        INIT_LIST_HEAD(&cc->cc_rwctxts);
        cc->cc_sqecount = 0;
@@ -174,7 +183,6 @@ static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
                                    ctxt->rw_nents, dir);
                svc_rdma_put_rw_ctxt(rdma, ctxt);
        }
-       svc_xprt_put(&rdma->sc_xprt);
 }
 
 /* State for sending a Write or Reply chunk.
@@ -236,7 +244,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        struct svc_rdma_write_info *info =
                        container_of(cc, struct svc_rdma_write_info, wi_cc);
 
-       trace_svcrdma_wc_write(wc);
+       trace_svcrdma_wc_write(wc, &cc->cc_cid);
 
        atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
        wake_up(&rdma->sc_send_wait);
@@ -294,7 +302,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
        struct svc_rdma_read_info *info =
                        container_of(cc, struct svc_rdma_read_info, ri_cc);
 
-       trace_svcrdma_wc_read(wc);
+       trace_svcrdma_wc_read(wc, &cc->cc_cid);
 
        atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
        wake_up(&rdma->sc_send_wait);
@@ -350,6 +358,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
        do {
                if (atomic_sub_return(cc->cc_sqecount,
                                      &rdma->sc_sq_avail) > 0) {
+                       trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
                        ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
                        if (ret)
                                break;
@@ -441,34 +450,32 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
        seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
        do {
                unsigned int write_len;
-               u32 seg_length, seg_handle;
-               u64 seg_offset;
+               u32 handle, length;
+               u64 offset;
 
                if (info->wi_seg_no >= info->wi_nsegs)
                        goto out_overflow;
 
-               seg_handle = be32_to_cpup(seg);
-               seg_length = be32_to_cpup(seg + 1);
-               xdr_decode_hyper(seg + 2, &seg_offset);
-               seg_offset += info->wi_seg_off;
+               xdr_decode_rdma_segment(seg, &handle, &length, &offset);
+               offset += info->wi_seg_off;
 
-               write_len = min(remaining, seg_length - info->wi_seg_off);
+               write_len = min(remaining, length - info->wi_seg_off);
                ctxt = svc_rdma_get_rw_ctxt(rdma,
                                            (write_len >> PAGE_SHIFT) + 2);
                if (!ctxt)
                        return -ENOMEM;
 
                constructor(info, write_len, ctxt);
-               ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
+               ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
                                           DMA_TO_DEVICE);
                if (ret < 0)
                        return -EIO;
 
-               trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
+               trace_svcrdma_send_wseg(handle, write_len, offset);
 
                list_add(&ctxt->rw_list, &cc->cc_rwctxts);
                cc->cc_sqecount += ret;
-               if (write_len == seg_length - info->wi_seg_off) {
+               if (write_len == length - info->wi_seg_off) {
                        seg += 4;
                        info->wi_seg_no++;
                        info->wi_seg_off = 0;
@@ -684,35 +691,24 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
                                     struct svc_rdma_read_info *info,
                                     __be32 *p)
 {
-       unsigned int i;
        int ret;
 
        ret = -EINVAL;
        info->ri_chunklen = 0;
        while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
-               u32 rs_handle, rs_length;
-               u64 rs_offset;
-
-               rs_handle = be32_to_cpup(p++);
-               rs_length = be32_to_cpup(p++);
-               p = xdr_decode_hyper(p, &rs_offset);
+               u32 handle, length;
+               u64 offset;
 
-               ret = svc_rdma_build_read_segment(info, rqstp,
-                                                 rs_handle, rs_length,
-                                                 rs_offset);
+               p = xdr_decode_rdma_segment(p, &handle, &length, &offset);
+               ret = svc_rdma_build_read_segment(info, rqstp, handle, length,
+                                                 offset);
                if (ret < 0)
                        break;
 
-               trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset);
-               info->ri_chunklen += rs_length;
+               trace_svcrdma_send_rseg(handle, length, offset);
+               info->ri_chunklen += length;
        }
 
-       /* Pages under I/O have been copied to head->rc_pages.
-        * Prevent their premature release by svc_xprt_release() .
-        */
-       for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
-               rqstp->rq_pages[i] = NULL;
-
        return ret;
 }
 
@@ -807,6 +803,26 @@ out:
        return ret;
 }
 
+/* Pages under I/O have been copied to head->rc_pages. Ensure they
+ * are not released by svc_xprt_release() until the I/O is complete.
+ *
+ * This has to be done after all Read WRs are constructed to properly
+ * handle a page that is part of I/O on behalf of two different RDMA
+ * segments.
+ *
+ * Do this only if I/O has been posted. Otherwise, we do indeed want
+ * svc_xprt_release() to clean things up properly.
+ */
+static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
+                                  const unsigned int start,
+                                  const unsigned int num_pages)
+{
+       unsigned int i;
+
+       for (i = start; i < num_pages + start; i++)
+               rqstp->rq_pages[i] = NULL;
+}
+
 /**
  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
  * @rdma: controlling RDMA transport
@@ -860,6 +876,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
        ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
        if (ret < 0)
                goto out_err;
+       svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
        return 0;
 
 out_err:
index 38e7c3c..7b94d97 100644 (file)
 #include <rdma/rdma_cm.h>
 
 #include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/rpc_rdma.h>
 #include <linux/sunrpc/svc_rdma.h>
 
 #include "xprt_rdma.h"
@@ -123,6 +122,13 @@ svc_rdma_next_send_ctxt(struct list_head *list)
                                        sc_list);
 }
 
+static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+                                  struct rpc_rdma_cid *cid)
+{
+       cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+       cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
 static struct svc_rdma_send_ctxt *
 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 {
@@ -145,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
        if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
                goto fail2;
 
+       svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
+
        ctxt->sc_send_wr.next = NULL;
        ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
        ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
@@ -269,34 +277,33 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct svcxprt_rdma *rdma = cq->cq_context;
        struct ib_cqe *cqe = wc->wr_cqe;
-       struct svc_rdma_send_ctxt *ctxt;
+       struct svc_rdma_send_ctxt *ctxt =
+               container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
 
-       trace_svcrdma_wc_send(wc);
+       trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
 
        atomic_inc(&rdma->sc_sq_avail);
        wake_up(&rdma->sc_send_wait);
 
-       ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
        svc_rdma_send_ctxt_put(rdma, ctxt);
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
                svc_xprt_enqueue(&rdma->sc_xprt);
        }
-
-       svc_xprt_put(&rdma->sc_xprt);
 }
 
 /**
  * svc_rdma_send - Post a single Send WR
  * @rdma: transport on which to post the WR
- * @wr: prepared Send WR to post
+ * @ctxt: send ctxt with a Send WR ready to post
  *
  * Returns zero the Send WR was posted successfully. Otherwise, a
  * negative errno is returned.
  */
-int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
+int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
 {
+       struct ib_send_wr *wr = &ctxt->sc_send_wr;
        int ret;
 
        might_sleep();
@@ -321,8 +328,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
                        continue;
                }
 
-               svc_xprt_get(&rdma->sc_xprt);
-               trace_svcrdma_post_send(wr);
+               trace_svcrdma_post_send(ctxt);
                ret = ib_post_send(rdma->sc_qp, wr, NULL);
                if (ret)
                        break;
@@ -331,7 +337,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
 
        trace_svcrdma_sq_post_err(rdma, ret);
        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
-       svc_xprt_put(&rdma->sc_xprt);
        wake_up(&rdma->sc_send_wait);
        return ret;
 }
@@ -375,11 +380,8 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src,
        if (!p)
                return -EMSGSIZE;
 
-       handle = be32_to_cpup(src++);
-       length = be32_to_cpup(src++);
-       xdr_decode_hyper(src, &offset);
+       xdr_decode_rdma_segment(src, &handle, &length, &offset);
 
-       *p++ = cpu_to_be32(handle);
        if (*remaining < length) {
                /* segment only partly filled */
                length = *remaining;
@@ -388,8 +390,7 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src,
                /* entire segment was consumed */
                *remaining -= length;
        }
-       *p++ = cpu_to_be32(length);
-       xdr_encode_hyper(p, offset);
+       xdr_encode_rdma_segment(p, handle, length, offset);
 
        trace_svcrdma_encode_wseg(handle, length, offset);
        return len;
@@ -801,45 +802,76 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
        } else {
                sctxt->sc_send_wr.opcode = IB_WR_SEND;
        }
-       return svc_rdma_send(rdma, &sctxt->sc_send_wr);
+       return svc_rdma_send(rdma, sctxt);
 }
 
-/* Given the client-provided Write and Reply chunks, the server was not
- * able to form a complete reply. Return an RDMA_ERROR message so the
- * client can retire this RPC transaction. As above, the Send completion
- * routine releases payload pages that were part of a previous RDMA Write.
- *
- * Remote Invalidation is skipped for simplicity.
+/**
+ * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
+ * @rdma: controlling transport context
+ * @sctxt: Send context for the response
+ * @rctxt: Receive context for incoming bad message
+ * @status: negative errno indicating error that occurred
+ *
+ * Given the client-provided Read, Write, and Reply chunks, the
+ * server was not able to parse the Call or form a complete Reply.
+ * Return an RDMA_ERROR message so the client can retire the RPC
+ * transaction.
+ *
+ * The caller does not have to release @sctxt. It is released by
+ * Send completion, or by this function on error.
  */
-static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
-                                  struct svc_rdma_send_ctxt *ctxt,
-                                  struct svc_rqst *rqstp)
+void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
+                            struct svc_rdma_send_ctxt *sctxt,
+                            struct svc_rdma_recv_ctxt *rctxt,
+                            int status)
 {
-       struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
        __be32 *rdma_argp = rctxt->rc_recv_buf;
        __be32 *p;
 
-       rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
-       xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
-                       NULL);
+       rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
+       xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
+                       sctxt->sc_xprt_buf, NULL);
 
-       p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_ERR);
+       p = xdr_reserve_space(&sctxt->sc_stream,
+                             rpcrdma_fixed_maxsz * sizeof(*p));
        if (!p)
-               return -ENOMSG;
+               goto put_ctxt;
 
        *p++ = *rdma_argp;
        *p++ = *(rdma_argp + 1);
        *p++ = rdma->sc_fc_credits;
-       *p++ = rdma_error;
-       *p   = err_chunk;
-       trace_svcrdma_err_chunk(*rdma_argp);
+       *p = rdma_error;
+
+       switch (status) {
+       case -EPROTONOSUPPORT:
+               p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
+               if (!p)
+                       goto put_ctxt;
+
+               *p++ = err_vers;
+               *p++ = rpcrdma_version;
+               *p = rpcrdma_version;
+               trace_svcrdma_err_vers(*rdma_argp);
+               break;
+       default:
+               p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
+               if (!p)
+                       goto put_ctxt;
+
+               *p = err_chunk;
+               trace_svcrdma_err_chunk(*rdma_argp);
+       }
 
-       svc_rdma_save_io_pages(rqstp, ctxt);
+       /* Remote Invalidation is skipped for simplicity. */
+       sctxt->sc_send_wr.num_sge = 1;
+       sctxt->sc_send_wr.opcode = IB_WR_SEND;
+       sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
+       if (svc_rdma_send(rdma, sctxt))
+               goto put_ctxt;
+       return;
 
-       ctxt->sc_send_wr.num_sge = 1;
-       ctxt->sc_send_wr.opcode = IB_WR_SEND;
-       ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
-       return svc_rdma_send(rdma, &ctxt->sc_send_wr);
+put_ctxt:
+       svc_rdma_send_ctxt_put(rdma, sctxt);
 }
 
 /**
@@ -930,15 +962,17 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
        if (ret != -E2BIG && ret != -EINVAL)
                goto err1;
 
-       ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
-       if (ret < 0)
-               goto err1;
+       /* Send completion releases payload pages that were part
+        * of previously posted RDMA Writes.
+        */
+       svc_rdma_save_io_pages(rqstp, sctxt);
+       svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
        return 0;
 
  err1:
        svc_rdma_send_ctxt_put(rdma, sctxt);
  err0:
-       trace_svcrdma_send_failed(rqstp, ret);
+       trace_svcrdma_send_err(rqstp, ret);
        set_bit(XPT_CLOSE, &xprt->xpt_flags);
        return -ENOTCONN;
 }
index d38be57..fb04479 100644 (file)
@@ -55,7 +55,6 @@
 
 #include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/rpc_rdma.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/svc_rdma.h>
 
@@ -238,65 +237,56 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
        svc_xprt_enqueue(&listen_xprt->sc_xprt);
 }
 
-/*
- * Handles events generated on the listening endpoint. These events will be
- * either be incoming connect requests or adapter removal  events.
+/**
+ * svc_rdma_listen_handler - Handle CM events generated on a listening endpoint
+ * @cma_id: the server's listener rdma_cm_id
+ * @event: details of the event
+ *
+ * Return values:
+ *     %0: Do not destroy @cma_id
+ *     %1: Destroy @cma_id (never returned here)
+ *
+ * NB: There is never a DEVICE_REMOVAL event for INADDR_ANY listeners.
  */
-static int rdma_listen_handler(struct rdma_cm_id *cma_id,
-                              struct rdma_cm_event *event)
+static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
+                                  struct rdma_cm_event *event)
 {
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
-               dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
-                       "event = %s (%d)\n", cma_id, cma_id->context,
-                       rdma_event_msg(event->event), event->event);
                handle_connect_req(cma_id, &event->param.conn);
                break;
        default:
-               /* NB: No device removal upcall for INADDR_ANY listeners */
-               dprintk("svcrdma: Unexpected event on listening endpoint %p, "
-                       "event = %s (%d)\n", cma_id,
-                       rdma_event_msg(event->event), event->event);
                break;
        }
-
        return 0;
 }
 
-static int rdma_cma_handler(struct rdma_cm_id *cma_id,
-                           struct rdma_cm_event *event)
+/**
+ * svc_rdma_cma_handler - Handle CM events on client connections
+ * @cma_id: the server's listener rdma_cm_id
+ * @event: details of the event
+ *
+ * Return values:
+ *     %0: Do not destroy @cma_id
+ *     %1: Destroy @cma_id (never returned here)
+ */
+static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id,
+                               struct rdma_cm_event *event)
 {
        struct svcxprt_rdma *rdma = cma_id->context;
        struct svc_xprt *xprt = &rdma->sc_xprt;
 
        switch (event->event) {
        case RDMA_CM_EVENT_ESTABLISHED:
-               /* Accept complete */
-               svc_xprt_get(xprt);
-               dprintk("svcrdma: Connection completed on DTO xprt=%p, "
-                       "cm_id=%p\n", xprt, cma_id);
                clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
                svc_xprt_enqueue(xprt);
                break;
        case RDMA_CM_EVENT_DISCONNECTED:
-               dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
-                       xprt, cma_id);
-               set_bit(XPT_CLOSE, &xprt->xpt_flags);
-               svc_xprt_enqueue(xprt);
-               svc_xprt_put(xprt);
-               break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
-               dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
-                       "event = %s (%d)\n", cma_id, xprt,
-                       rdma_event_msg(event->event), event->event);
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
                svc_xprt_enqueue(xprt);
-               svc_xprt_put(xprt);
                break;
        default:
-               dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
-                       "event = %s (%d)\n", cma_id,
-                       rdma_event_msg(event->event), event->event);
                break;
        }
        return 0;
@@ -322,7 +312,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
        set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
        strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
 
-       listen_id = rdma_create_id(net, rdma_listen_handler, cma_xprt,
+       listen_id = rdma_create_id(net, svc_rdma_listen_handler, cma_xprt,
                                   RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(listen_id)) {
                ret = PTR_ERR(listen_id);
@@ -486,7 +476,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
                goto errout;
 
        /* Swap out the handler */
-       newxprt->sc_cm_id->event_handler = rdma_cma_handler;
+       newxprt->sc_cm_id->event_handler = svc_rdma_cma_handler;
 
        /* Construct RDMA-CM private message */
        pmsg.cp_magic = rpcrdma_cmp_magic;
@@ -540,24 +530,11 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        return NULL;
 }
 
-/*
- * When connected, an svc_xprt has at least two references:
- *
- * - A reference held by the cm_id between the ESTABLISHED and
- *   DISCONNECTED events. If the remote peer disconnected first, this
- *   reference could be gone.
- *
- * - A reference held by the svc_recv code that called this function
- *   as part of close processing.
- *
- * At a minimum one references should still be held.
- */
 static void svc_rdma_detach(struct svc_xprt *xprt)
 {
        struct svcxprt_rdma *rdma =
                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 
-       /* Disconnect and flush posted WQE */
        rdma_disconnect(rdma->sc_cm_id);
 }
 
@@ -567,6 +544,7 @@ static void __svc_rdma_free(struct work_struct *work)
                container_of(work, struct svcxprt_rdma, sc_work);
        struct svc_xprt *xprt = &rdma->sc_xprt;
 
+       /* This blocks until the Completion Queues are empty */
        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
                ib_drain_qp(rdma->sc_qp);
 
index 9dd7802..be1c400 100644 (file)
@@ -6,6 +6,7 @@
 menuconfig TIPC
        tristate "The TIPC Protocol"
        depends on INET
+       depends on IPV6 || IPV6=n
        help
          The Transparent Inter Process Communication (TIPC) protocol is
          specially designed for intra cluster communication. This protocol
index 2175163..90e3c70 100644 (file)
@@ -275,8 +275,9 @@ err_out:
 static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                                 struct tipc_nl_compat_msg *msg)
 {
-       int err;
+       struct nlmsghdr *nlh;
        struct sk_buff *arg;
+       int err;
 
        if (msg->req_type && (!msg->req_size ||
                              !TLV_CHECK_TYPE(msg->req, msg->req_type)))
@@ -305,6 +306,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
        }
 
+       nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+       if (!nlh) {
+               kfree_skb(arg);
+               kfree_skb(msg->rep);
+               msg->rep = NULL;
+               return -EMSGSIZE;
+       }
+       nlmsg_end(arg, nlh);
+
        err = __tipc_nl_compat_dumpit(cmd, msg, arg);
        if (err) {
                kfree_skb(msg->rep);
index 18fa606..b74e274 100644 (file)
@@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct iov_iter msg_iter;
-       char *kaddr = kmap(page);
+       char *kaddr;
        struct kvec iov;
        int rc;
 
@@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
                goto out;
        }
 
+       kaddr = kmap(page);
        iov.iov_base = kaddr + offset;
        iov.iov_len = size;
        iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
index 710bd44..9a3d9fe 100644 (file)
@@ -935,7 +935,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        int ret = 0;
        int pending;
 
-       if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
+       if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+                              MSG_CMSG_COMPAT))
                return -EOPNOTSUPP;
 
        mutex_lock(&tls_ctx->tx_lock);
index 27bbcfa..9e93bc2 100644 (file)
@@ -1032,7 +1032,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
                }
 
                /* Connected sockets that can produce data can be written. */
-               if (sk->sk_state == TCP_ESTABLISHED) {
+               if (transport && sk->sk_state == TCP_ESTABLISHED) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
                                bool space_avail_now = false;
                                int ret = transport->notify_poll_out(
index 042ea9b..d5280fd 100644 (file)
@@ -122,7 +122,7 @@ struct xfrm_pol_inexact_bin {
        /* list containing '*:*' policies */
        struct hlist_head hhead;
 
-       seqcount_t count;
+       seqcount_spinlock_t count;
        /* tree sorted by daddr/prefix */
        struct rb_root root_d;
 
@@ -155,7 +155,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
                                                __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_t xfrm_policy_hash_generation;
+static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
 
 static struct rhashtable xfrm_policy_inexact_table;
 static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -719,7 +719,7 @@ xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
        INIT_HLIST_HEAD(&bin->hhead);
        bin->root_d = RB_ROOT;
        bin->root_s = RB_ROOT;
-       seqcount_init(&bin->count);
+       seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
 
        prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
                                                &bin->k, &bin->head,
@@ -1899,7 +1899,7 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
 
 static struct xfrm_pol_inexact_node *
 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
-                               seqcount_t *count,
+                               seqcount_spinlock_t *count,
                                const xfrm_address_t *addr, u16 family)
 {
        const struct rb_node *parent;
@@ -4157,7 +4157,7 @@ void __init xfrm_init(void)
 {
        register_pernet_subsys(&xfrm_net_ops);
        xfrm_dev_init();
-       seqcount_init(&xfrm_policy_hash_generation);
+       seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
        xfrm_input_init();
 
 #ifdef CONFIG_XFRM_ESPINTCP
index dbdf939..19d5568 100644 (file)
@@ -1,3 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-userprogs := cfag12864b-example
-always-y := $(userprogs)
+userprogs-always-y += cfag12864b-example
index 989e4ba..629e43b 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-userprogs := binderfs_example
-always-y := $(userprogs)
+userprogs-always-y += binderfs_example
 
 userccflags += -I usr/include
index 50cb40e..d98a9e0 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_SAMPLE_CONNECTOR) += cn_test.o
 
-userprogs := ucon
-always-$(CONFIG_CC_CAN_LINK) := $(userprogs)
+userprogs-always-$(CONFIG_CC_CAN_LINK) += ucon
 
 userccflags += -I usr/include
index d2c77ed..594d989 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-userprogs := hid-example
-always-y := $(userprogs)
+userprogs-always-y += hid-example
 
 userccflags += -I usr/include
index 329411f..c54b8a0 100644 (file)
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 # Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
-
-userprogs := mei-amt-version
-always-y := $(userprogs)
+userprogs-always-y += mei-amt-version
 
 userccflags += -I usr/include
index 6e5b67e..9754e2d 100644 (file)
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-
-usertprogs := pidfd-metadata
-always-y := $(userprogs)
+usertprogs-always-y += pidfd-metadata
 
 userccflags += -I usr/include
index 75916c2..c85ae0e 100644 (file)
@@ -1,8 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
-userprogs := bpf-fancy dropper bpf-direct user-trap
+userprogs-always-y += bpf-fancy dropper bpf-direct user-trap
 
 bpf-fancy-objs := bpf-fancy.o bpf-helper.o
 
 userccflags += -I usr/include
-
-always-y := $(userprogs)
index 15c7ddb..e6836cd 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-userprogs := hpet_example
-always-y := $(userprogs)
+userprogs-always-y += hpet_example
 
 userccflags += -I usr/include
index 9e652fc..0aa424e 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-userprogs := uhid-example
-always-y := $(userprogs)
+userprogs-always-y += uhid-example
 
 userccflags += -I usr/include
index 00b6824..6377a67 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-userprogs := test-fsmount test-statx
-always-y := $(userprogs)
+userprogs-always-y += test-fsmount test-statx
 
 userccflags += -I usr/include
index 792b22f..c0db3a6 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-userprogs := watch_test
-always-y := $(userprogs)
+userprogs-always-y += watch_test
 
 userccflags += -I usr/include
index 17384cf..ab39d23 100644 (file)
@@ -1,3 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-userprogs := watchdog-simple
-always-y := $(userprogs)
+userprogs-always-y += watchdog-simple
index 9a15fbf..83a1637 100644 (file)
@@ -119,25 +119,21 @@ as-instr = $(call try-run,\
 __cc-option = $(call try-run,\
        $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
 
-# Do not attempt to build with gcc plugins during cc-option tests.
-# (And this uses delayed resolution so the flags will be up to date.)
-CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
-
 # cc-option
 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
 
 cc-option = $(call __cc-option, $(CC),\
-       $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS),$(1),$(2))
+       $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS),$(1),$(2))
 
 # cc-option-yn
 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
 cc-option-yn = $(call try-run,\
-       $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+       $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
 
 # cc-disable-warning
 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
 cc-disable-warning = $(call try-run,\
-       $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-ifversion
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
index 95ecf97..bc018e4 100644 (file)
@@ -3,14 +3,14 @@
 # scripts contains sources for various helper programs used throughout
 # the kernel for the build process.
 
-always-$(CONFIG_BUILD_BIN2C)                   += bin2c
-always-$(CONFIG_KALLSYMS)                      += kallsyms
-always-$(BUILD_C_RECORDMCOUNT)                 += recordmcount
-always-$(CONFIG_BUILDTIME_TABLE_SORT)          += sorttable
-always-$(CONFIG_ASN1)                          += asn1_compiler
-always-$(CONFIG_MODULE_SIG_FORMAT)             += sign-file
-always-$(CONFIG_SYSTEM_TRUSTED_KEYRING)                += extract-cert
-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE)      += insert-sys-cert
+hostprogs-always-$(CONFIG_BUILD_BIN2C)                 += bin2c
+hostprogs-always-$(CONFIG_KALLSYMS)                    += kallsyms
+hostprogs-always-$(BUILD_C_RECORDMCOUNT)               += recordmcount
+hostprogs-always-$(CONFIG_BUILDTIME_TABLE_SORT)                += sorttable
+hostprogs-always-$(CONFIG_ASN1)                                += asn1_compiler
+hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT)           += sign-file
+hostprogs-always-$(CONFIG_SYSTEM_TRUSTED_KEYRING)      += extract-cert
+hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE)    += insert-sys-cert
 
 HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
 HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
@@ -26,8 +26,6 @@ HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED
 HOSTLDLIBS_sorttable = -lpthread
 endif
 
-hostprogs := $(always-y) $(always-m)
-
 # The following programs are only built on demand
 hostprogs += unifdef
 
index 2e8810b..a467b93 100644 (file)
@@ -45,12 +45,15 @@ include $(kbuild-file)
 
 include scripts/Makefile.lib
 
-# Do not include host rules unless needed
-ifneq ($(hostprogs)$(hostcxxlibs-y)$(hostcxxlibs-m),)
+# Do not include hostprogs rules unless needed.
+# $(sort ...) is used here to remove duplicated words and excessive spaces.
+hostprogs := $(sort $(hostprogs))
+ifneq ($(hostprogs),)
 include scripts/Makefile.host
 endif
 
 # Do not include userprogs rules unless needed.
+# $(sort ...) is used here to remove duplicated words and excessive spaces.
 userprogs := $(sort $(userprogs))
 ifneq ($(userprogs),)
 include scripts/Makefile.userprogs
@@ -252,9 +255,9 @@ cmd_gen_ksymdeps = \
 endif
 
 define rule_cc_o_c
-       $(call cmd,checksrc)
        $(call cmd_and_fixdep,cc_o_c)
        $(call cmd,gen_ksymdeps)
+       $(call cmd,checksrc)
        $(call cmd,checkdoc)
        $(call cmd,objtool)
        $(call cmd,modversions_c)
@@ -277,8 +280,8 @@ endif
 
 # Built-in and composite module parts
 $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
-       $(call cmd,force_checksrc)
        $(call if_changed_rule,cc_o_c)
+       $(call cmd,force_checksrc)
 
 cmd_mod = { \
        echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \
@@ -515,15 +518,13 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
 
-ifdef building_out_of_srctree
 # Create directories for object files if they do not exist
-obj-dirs := $(sort $(obj) $(patsubst %/,%, $(dir $(targets))))
+obj-dirs := $(sort $(patsubst %/,%, $(dir $(targets))))
 # If targets exist, their directories apparently exist. Skip mkdir.
 existing-dirs := $(sort $(patsubst %/,%, $(dir $(existing-targets))))
 obj-dirs := $(strip $(filter-out $(existing-dirs), $(obj-dirs)))
 ifneq ($(obj-dirs),)
 $(shell mkdir -p $(obj-dirs))
 endif
-endif
 
 .PHONY: $(PHONY)
index e2c7612..d9e0cea 100644 (file)
@@ -27,10 +27,15 @@ subdir-ymn  := $(addprefix $(obj)/,$(subdir-ymn))
 # build a list of files to remove, usually relative to the current
 # directory
 
-__clean-files  := $(extra-y) $(extra-m) $(extra-)       \
-                  $(always) $(always-y) $(always-m) $(always-) $(targets) $(clean-files)   \
-                  $(hostprogs) $(hostprogs-y) $(hostprogs-m) $(hostprogs-) $(userprogs) \
-                  $(hostcxxlibs-y) $(hostcxxlibs-m)
+__clean-files  := \
+       $(clean-files) $(targets) $(hostprogs) $(userprogs) \
+       $(extra-y) $(extra-m) $(extra-) \
+       $(always-y) $(always-m) $(always-) \
+       $(hostprogs-always-y) $(hostprogs-always-m) $(hostprogs-always-) \
+       $(userprogs-always-y) $(userprogs-always-m) $(userprogs-always-)
+
+# deprecated
+__clean-files  += $(always) $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
index c8a4a03..278b4d6 100644 (file)
@@ -38,39 +38,31 @@ $(obj)/%.tab.c $(obj)/%.tab.h: $(src)/%.y FORCE
 # Will compile qconf as a C++ program, and menu as a C program.
 # They are linked as C++ code to the executable qconf
 
-__hostprogs := $(sort $(hostprogs))
-host-cxxshlib := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
-
 # C code
 # Executables compiled from a single .c file
-host-csingle   := $(foreach m,$(__hostprogs), \
+host-csingle   := $(foreach m,$(hostprogs), \
                        $(if $($(m)-objs)$($(m)-cxxobjs),,$(m)))
 
 # C executables linked based on several .o files
-host-cmulti    := $(foreach m,$(__hostprogs),\
+host-cmulti    := $(foreach m,$(hostprogs),\
                   $(if $($(m)-cxxobjs),,$(if $($(m)-objs),$(m))))
 
 # Object (.o) files compiled from .c files
-host-cobjs     := $(sort $(foreach m,$(__hostprogs),$($(m)-objs)))
+host-cobjs     := $(sort $(foreach m,$(hostprogs),$($(m)-objs)))
 
 # C++ code
 # C++ executables compiled from at least one .cc file
 # and zero or more .c files
-host-cxxmulti  := $(foreach m,$(__hostprogs),$(if $($(m)-cxxobjs),$(m)))
+host-cxxmulti  := $(foreach m,$(hostprogs),$(if $($(m)-cxxobjs),$(m)))
 
 # C++ Object (.o) files compiled from .cc files
 host-cxxobjs   := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
 
-# Object (.o) files used by the shared libaries
-host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
-
 host-csingle   := $(addprefix $(obj)/,$(host-csingle))
 host-cmulti    := $(addprefix $(obj)/,$(host-cmulti))
 host-cobjs     := $(addprefix $(obj)/,$(host-cobjs))
 host-cxxmulti  := $(addprefix $(obj)/,$(host-cxxmulti))
 host-cxxobjs   := $(addprefix $(obj)/,$(host-cxxobjs))
-host-cxxshlib  := $(addprefix $(obj)/,$(host-cxxshlib))
-host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
 
 #####
 # Handle options to gcc. Support building with separate output directory
@@ -136,25 +128,5 @@ quiet_cmd_host-cxxobjs     = HOSTCXX $@
 $(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
        $(call if_changed_dep,host-cxxobjs)
 
-# Compile .c file, create position independent .o file
-# Note that plugin capable gcc versions can be either C or C++ based
-# therefore plugin source files have to be compilable in both C and C++ mode.
-# This is why a C++ compiler is invoked on a .c file.
-# host-cxxshobjs -> .o
-quiet_cmd_host-cxxshobjs       = HOSTCXX -fPIC $@
-      cmd_host-cxxshobjs       = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
-$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
-       $(call if_changed_dep,host-cxxshobjs)
-
-# Link a shared library, based on position independent .o files
-# *.o -> .so shared library (host-cxxshlib)
-quiet_cmd_host-cxxshlib        = HOSTLLD -shared $@
-      cmd_host-cxxshlib        = $(HOSTCXX) $(KBUILD_HOSTLDFLAGS) -shared -o $@ \
-                         $(addprefix $(obj)/, $($(target-stem)-objs)) \
-                         $(KBUILD_HOSTLDLIBS) $(HOSTLDLIBS_$(target-stem).so)
-$(host-cxxshlib): FORCE
-       $(call if_changed,host-cxxshlib)
-$(call multi_depend, $(host-cxxshlib), .so, -objs)
-
-targets += $(host-csingle)  $(host-cmulti) $(host-cobjs)\
-          $(host-cxxmulti) $(host-cxxobjs) $(host-cxxshlib) $(host-cxxshobjs)
+targets += $(host-csingle) $(host-cmulti) $(host-cobjs) \
+          $(host-cxxmulti) $(host-cxxobjs)
index 52b1133..67e8cfe 100644 (file)
@@ -1,10 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
-ifdef CONFIG_KCOV
-
 kcov-flags-$(CONFIG_CC_HAS_SANCOV_TRACE_PC)    += -fsanitize-coverage=trace-pc
 kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS)   += -fsanitize-coverage=trace-cmp
 kcov-flags-$(CONFIG_GCC_PLUGIN_SANCOV)         += -fplugin=$(objtree)/scripts/gcc-plugins/sancov_plugin.so
 
 export CFLAGS_KCOV := $(kcov-flags-y)
-
-endif
index dd66206..c50f27b 100644 (file)
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-ifdef CONFIG_KCSAN
-
 # GCC and Clang accept backend options differently. Do not wrap in cc-option,
 # because Clang accepts "--param" even if it is unused.
 ifdef CONFIG_CC_IS_CLANG
@@ -15,5 +13,3 @@ CFLAGS_KCSAN := -fsanitize=thread \
        $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
        $(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \
        $(call cc-param,tsan-distinguish-volatile=1)
-
-endif # CONFIG_KCSAN
index 54f7b7e..3d59971 100644 (file)
@@ -68,6 +68,17 @@ real-obj-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))
 
 always-y += $(always-m)
 
+# hostprogs-always-y += foo
+# ... is a shorthand for
+# hostprogs += foo
+# always-y  += foo
+hostprogs += $(hostprogs-always-y) $(hostprogs-always-m)
+always-y += $(hostprogs-always-y) $(hostprogs-always-m)
+
+# userprogs-always-y is likewise.
+userprogs += $(userprogs-always-y) $(userprogs-always-m)
+always-y += $(userprogs-always-y) $(userprogs-always-m)
+
 # DTB
 # If CONFIG_OF_ALL_DTBS is enabled, all DT blobs are built
 extra-y                                += $(dtb-y)
@@ -111,12 +122,14 @@ basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
 modname_flags  = -DKBUILD_MODNAME=$(call name-fix,$(modname))
 modfile_flags  = -DKBUILD_MODFILE=$(call stringify,$(modfile))
 
-orig_c_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \
-                 $(ccflags-y) $(CFLAGS_$(target-stem).o)
-_c_flags       = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), $(orig_c_flags))
-orig_a_flags   = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) \
-                 $(asflags-y) $(AFLAGS_$(target-stem).o)
-_a_flags       = $(filter-out $(AFLAGS_REMOVE_$(target-stem).o), $(orig_a_flags))
+_c_flags       = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), \
+                     $(filter-out $(ccflags-remove-y), \
+                         $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(ccflags-y)) \
+                     $(CFLAGS_$(target-stem).o))
+_a_flags       = $(filter-out $(AFLAGS_REMOVE_$(target-stem).o), \
+                     $(filter-out $(asflags-remove-y), \
+                         $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(asflags-y)) \
+                     $(AFLAGS_$(target-stem).o))
 _cpp_flags     = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(target-stem).lds)
 
 #
@@ -303,8 +316,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb FORCE
        $(call if_changed,dt_S_dtb)
 
 quiet_cmd_dtc = DTC     $@
-cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
-       $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
+cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
        $(DTC) -O $(patsubst .%,%,$(suffix $@)) -o $@ -b 0 \
                $(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \
                -d $(depfile).dtc.tmp $(dtc-tmp) ; \
index 5b15bc4..2734802 100644 (file)
@@ -1,6 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-ifdef CONFIG_UBSAN
-
 ifdef CONFIG_UBSAN_ALIGNMENT
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
 endif
@@ -26,4 +24,3 @@ endif
       # -fsanitize=* options makes GCC less smart than usual and
       # increase number of 'maybe-uninitialized false-positives
       CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
-endif
index 290dd27..eeb6a38 100644 (file)
@@ -2,5 +2,4 @@
 #
 # fixdep: used to generate dependency information during build process
 
-hostprogs      := fixdep
-always-y       := $(hostprogs)
+hostprogs-always-y     += fixdep
index 00a10a2..1548f9c 100755 (executable)
@@ -34,7 +34,7 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
+REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 REGEX_QUOTES = re.compile("(\"(.*?)\")")
index 599b8c4..60d4a79 100755 (executable)
@@ -59,7 +59,7 @@ my $spelling_file = "$D/spelling.txt";
 my $codespell = 0;
 my $codespellfile = "/usr/share/codespell/dictionary.txt";
 my $conststructsfile = "$D/const_structs.checkpatch";
-my $typedefsfile = "";
+my $typedefsfile;
 my $color = "auto";
 my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE
 # git output parsing needs US English output, so first set backtick child process LANGUAGE
@@ -588,6 +588,8 @@ our @mode_permission_funcs = (
        ["__ATTR", 2],
 );
 
+my $word_pattern = '\b[A-Z]?[a-z]{2,}\b';
+
 #Create a search pattern for all these functions to speed up a loop below
 our $mode_perms_search = "";
 foreach my $entry (@mode_permission_funcs) {
@@ -756,7 +758,7 @@ sub read_words {
                                next;
                        }
 
-                       $$wordsRef .= '|' if ($$wordsRef ne "");
+                       $$wordsRef .= '|' if (defined $$wordsRef);
                        $$wordsRef .= $line;
                }
                close($file);
@@ -766,16 +768,18 @@ sub read_words {
        return 0;
 }
 
-my $const_structs = "";
-read_words(\$const_structs, $conststructsfile)
-    or warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+my $const_structs;
+if (show_type("CONST_STRUCT")) {
+       read_words(\$const_structs, $conststructsfile)
+           or warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+}
 
-my $typeOtherTypedefs = "";
-if (length($typedefsfile)) {
+if (defined($typedefsfile)) {
+       my $typeOtherTypedefs;
        read_words(\$typeOtherTypedefs, $typedefsfile)
            or warn "No additional types will be considered - file '$typedefsfile': $!\n";
+       $typeTypedefs .= '|' . $typeOtherTypedefs if (defined $typeOtherTypedefs);
 }
-$typeTypedefs .= '|' . $typeOtherTypedefs if ($typeOtherTypedefs ne "");
 
 sub build_types {
        my $mods = "(?x:  \n" . join("|\n  ", (@modifierList, @modifierListFile)) . "\n)";
@@ -3041,11 +3045,7 @@ sub process {
 
                                if ($lines[$ln - 1] =~ /^\+\s*(?:bool|tristate|prompt)\s*["']/) {
                                        $is_start = 1;
-                               } elsif ($lines[$ln - 1] =~ /^\+\s*(?:help|---help---)\s*$/) {
-                                       if ($lines[$ln - 1] =~ "---help---") {
-                                               WARN("CONFIG_DESCRIPTION",
-                                                    "prefer 'help' over '---help---' for new help texts\n" . $herecurr);
-                                       }
+                               } elsif ($lines[$ln - 1] =~ /^\+\s*(?:---)?help(?:---)?$/) {
                                        $length = -1;
                                }
 
@@ -3310,6 +3310,42 @@ sub process {
                        }
                }
 
+# check for repeated words separated by a single space
+               if ($rawline =~ /^\+/) {
+                       while ($rawline =~ /\b($word_pattern) (?=($word_pattern))/g) {
+
+                               my $first = $1;
+                               my $second = $2;
+
+                               if ($first =~ /(?:struct|union|enum)/) {
+                                       pos($rawline) += length($first) + length($second) + 1;
+                                       next;
+                               }
+
+                               next if ($first ne $second);
+                               next if ($first eq 'long');
+
+                               if (WARN("REPEATED_WORD",
+                                        "Possible repeated word: '$first'\n" . $herecurr) &&
+                                   $fix) {
+                                       $fixed[$fixlinenr] =~ s/\b$first $second\b/$first/;
+                               }
+                       }
+
+                       # if it's a repeated word on consecutive lines in a comment block
+                       if ($prevline =~ /$;+\s*$/ &&
+                           $prevrawline =~ /($word_pattern)\s*$/) {
+                               my $last_word = $1;
+                               if ($rawline =~ /^\+\s*\*\s*$last_word /) {
+                                       if (WARN("REPEATED_WORD",
+                                                "Possible repeated word: '$last_word'\n" . $hereprev) &&
+                                           $fix) {
+                                               $fixed[$fixlinenr] =~ s/(\+\s*\*\s*)$last_word /$1/;
+                                       }
+                               }
+                       }
+               }
+
 # check for space before tabs.
                if ($rawline =~ /^\+/ && $rawline =~ / \t/) {
                        my $herevet = "$here\n" . cat_vet($rawline) . "\n";
@@ -5020,8 +5056,30 @@ sub process {
                        my ($s, $c) = ($stat, $cond);
 
                        if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
-                               ERROR("ASSIGN_IN_IF",
-                                     "do not use assignment in if condition\n" . $herecurr);
+                               if (ERROR("ASSIGN_IN_IF",
+                                         "do not use assignment in if condition\n" . $herecurr) &&
+                                   $fix && $perl_version_ok) {
+                                       if ($rawline =~ /^\+(\s+)if\s*\(\s*(\!)?\s*\(\s*(($Lval)\s*=\s*$LvalOrFunc)\s*\)\s*(?:($Compare)\s*($FuncArg))?\s*\)\s*(\{)?\s*$/) {
+                                               my $space = $1;
+                                               my $not = $2;
+                                               my $statement = $3;
+                                               my $assigned = $4;
+                                               my $test = $8;
+                                               my $against = $9;
+                                               my $brace = $15;
+                                               fix_delete_line($fixlinenr, $rawline);
+                                               fix_insert_line($fixlinenr, "$space$statement;");
+                                               my $newline = "${space}if (";
+                                               $newline .= '!' if defined($not);
+                                               $newline .= '(' if (defined $not && defined($test) && defined($against));
+                                               $newline .= "$assigned";
+                                               $newline .= " $test $against" if (defined($test) && defined($against));
+                                               $newline .= ')' if (defined $not && defined($test) && defined($against));
+                                               $newline .= ')';
+                                               $newline .= " {" if (defined($brace));
+                                               fix_insert_line($fixlinenr + 1, $newline);
+                                       }
+                               }
                        }
 
                        # Find out what is on the end of the line after the
@@ -6465,6 +6523,12 @@ sub process {
                        }
                }
 
+# check for IS_ENABLED() without CONFIG_<FOO> ($rawline for comments too)
+               if ($rawline =~ /\bIS_ENABLED\s*\(\s*(\w+)\s*\)/ && $1 !~ /^CONFIG_/) {
+                       WARN("IS_ENABLED_CONFIG",
+                            "IS_ENABLED($1) is normally used as IS_ENABLED(CONFIG_$1)\n" . $herecurr);
+               }
+
 # check for #if defined CONFIG_<FOO> || defined CONFIG_<FOO>_MODULE
                if ($line =~ /^\+\s*#\s*if\s+defined(?:\s*\(?\s*|\s+)(CONFIG_[A-Z_]+)\s*\)?\s*\|\|\s*defined(?:\s*\(?\s*|\s+)\1_MODULE\s*\)?\s*$/) {
                        my $config = $1;
@@ -6475,31 +6539,6 @@ sub process {
                        }
                }
 
-# check for case / default statements not preceded by break/fallthrough/switch
-               if ($line =~ /^.\s*(?:case\s+(?:$Ident|$Constant)\s*|default):/) {
-                       my $has_break = 0;
-                       my $has_statement = 0;
-                       my $count = 0;
-                       my $prevline = $linenr;
-                       while ($prevline > 1 && ($file || $count < 3) && !$has_break) {
-                               $prevline--;
-                               my $rline = $rawlines[$prevline - 1];
-                               my $fline = $lines[$prevline - 1];
-                               last if ($fline =~ /^\@\@/);
-                               next if ($fline =~ /^\-/);
-                               next if ($fline =~ /^.(?:\s*(?:case\s+(?:$Ident|$Constant)[\s$;]*|default):[\s$;]*)*$/);
-                               $has_break = 1 if ($rline =~ /fall[\s_-]*(through|thru)/i);
-                               next if ($fline =~ /^.[\s$;]*$/);
-                               $has_statement = 1;
-                               $count++;
-                               $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|exit\s*\(\b|return\b|goto\b|continue\b)/);
-                       }
-                       if (!$has_break && $has_statement) {
-                               WARN("MISSING_BREAK",
-                                    "Possible switch case/default not preceded by break or fallthrough comment\n" . $herecurr);
-                       }
-               }
-
 # check for /* fallthrough */ like comment, prefer fallthrough;
                my @fallthroughs = (
                        'fallthrough',
@@ -6615,7 +6654,8 @@ sub process {
 
 # check for various structs that are normally const (ops, kgdb, device_tree)
 # and avoid what seem like struct definitions 'struct foo {'
-               if ($line !~ /\bconst\b/ &&
+               if (defined($const_structs) &&
+                   $line !~ /\bconst\b/ &&
                    $line =~ /\bstruct\s+($const_structs)\b(?!\s*\{)/) {
                        WARN("CONST_STRUCT",
                             "struct $1 should normally be const\n" . $herecurr);
index 0b44917..a698ece 100644 (file)
@@ -1,9 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 # scripts/dtc makefile
 
-hostprogs                      := dtc
-always-$(CONFIG_DTC)           += $(hostprogs)
-always-$(CHECK_DT_BINDING)     += $(hostprogs)
+hostprogs-always-$(CONFIG_DTC)         += dtc
+hostprogs-always-$(CHECK_DT_BINDING)   += dtc
 
 dtc-objs       := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
                   srcpos.o checks.o util.o
index 4014ba7..d66949b 100644 (file)
@@ -1,22 +1,61 @@
 # SPDX-License-Identifier: GPL-2.0
-GCC_PLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
 
-HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
-HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
-HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat
-HOST_EXTRACXXFLAGS += -Wno-format-diag
-
-$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
+$(obj)/randomize_layout_plugin.so: $(objtree)/$(obj)/randomize_layout_seed.h
 quiet_cmd_create_randomize_layout_seed = GENSEED $@
 cmd_create_randomize_layout_seed = \
   $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
 $(objtree)/$(obj)/randomize_layout_seed.h: FORCE
        $(call if_changed,create_randomize_layout_seed)
-targets = randomize_layout_seed.h randomize_layout_hash.h
+targets += randomize_layout_seed.h randomize_layout_hash.h
+
+# Build rules for plugins
+#
+# No extra code is needed for single-file plugins.
+# For multi-file plugins, use *-objs syntax to list the objects.
+#
+# If the plugin foo.so is compiled from foo.c and foo2.c, you can do:
+#
+# foo-objs := foo.o foo2.o
+
+always-y += $(GCC_PLUGIN)
 
-hostcxxlibs-y := $(GCC_PLUGIN)
-always-y := $(hostcxxlibs-y)
+GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
 
-$(foreach p,$(hostcxxlibs-y:%.so=%),$(eval $(p)-objs := $(p).o))
+plugin_cxxflags        = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
+                  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \
+                  -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
+                  -ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \
+                  -Wno-format-diag
 
+plugin_ldflags = -shared
+
+plugin-single  := $(foreach m, $(GCC_PLUGIN), $(if $($(m:%.so=%-objs)),,$(m)))
+plugin-multi   := $(filter-out $(plugin-single), $(GCC_PLUGIN))
+plugin-objs    := $(sort $(foreach m, $(plugin-multi), $($(m:%.so=%-objs))))
+
+targets += $(plugin-single) $(plugin-multi) $(plugin-objs)
 clean-files += *.so
+
+plugin-single  := $(addprefix $(obj)/, $(plugin-single))
+plugin-multi   := $(addprefix $(obj)/, $(plugin-multi))
+plugin-objs    := $(addprefix $(obj)/, $(plugin-objs))
+
+quiet_cmd_plugin_cxx_so_c = HOSTCXX $@
+      cmd_plugin_cxx_so_c = $(HOSTCXX) $(plugin_cxxflags) $(plugin_ldflags) -o $@ $<
+
+$(plugin-single): $(obj)/%.so: $(src)/%.c FORCE
+       $(call if_changed_dep,plugin_cxx_so_c)
+
+quiet_cmd_plugin_ld_so_o = HOSTLD  $@
+      cmd_plugin_ld_so_o = $(HOSTCXX) $(plugin_ldflags) -o $@ \
+                          $(addprefix $(obj)/, $($(target-stem)-objs))
+
+$(plugin-multi): FORCE
+       $(call if_changed,plugin_ld_so_o)
+$(foreach m, $(notdir $(plugin-multi)), $(eval $(obj)/$m: $(addprefix $(obj)/, $($(m:%.so=%-objs)))))
+
+quiet_cmd_plugin_cxx_o_c = HOSTCXX $@
+      cmd_plugin_cxx_o_c = $(HOSTCXX) $(plugin_cxxflags) -c -o $@ $<
+
+$(plugin-objs): $(obj)/%.o: $(src)/%.c FORCE
+       $(call if_changed_dep,plugin_cxx_o_c)
index c4b9916..fe46285 100644 (file)
@@ -17,7 +17,7 @@ def rb_first(root):
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
     node = root['rb_node']
-    if node is 0:
+    if node == 0:
         return None
 
     while node['rb_left']:
@@ -33,7 +33,7 @@ def rb_last(root):
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
     node = root['rb_node']
-    if node is 0:
+    if node == 0:
         return None
 
     while node['rb_right']:
index d328de1..ce4f999 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-hostprogs      := genksyms
-always-y       := $(hostprogs)
+hostprogs-always-y     += genksyms
 
 genksyms-objs  := genksyms.o parse.tab.o lex.lex.o
 
index b4fa0e4..2f9afff 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "images.h"
 
-const char *xpm_load[] = {
+const char * const xpm_load[] = {
 "22 22 5 1",
 ". c None",
 "# c #000000",
@@ -35,7 +35,7 @@ const char *xpm_load[] = {
 "###############.......",
 "......................"};
 
-const char *xpm_save[] = {
+const char * const xpm_save[] = {
 "22 22 5 1",
 ". c None",
 "# c #000000",
@@ -65,7 +65,7 @@ const char *xpm_save[] = {
 "..##################..",
 "......................"};
 
-const char *xpm_back[] = {
+const char * const xpm_back[] = {
 "22 22 3 1",
 ". c None",
 "# c #000083",
@@ -93,7 +93,7 @@ const char *xpm_back[] = {
 "......................",
 "......................"};
 
-const char *xpm_tree_view[] = {
+const char * const xpm_tree_view[] = {
 "22 22 2 1",
 ". c None",
 "# c #000000",
@@ -120,7 +120,7 @@ const char *xpm_tree_view[] = {
 "......................",
 "......................"};
 
-const char *xpm_single_view[] = {
+const char * const xpm_single_view[] = {
 "22 22 2 1",
 ". c None",
 "# c #000000",
@@ -147,7 +147,7 @@ const char *xpm_single_view[] = {
 "......................",
 "......................"};
 
-const char *xpm_split_view[] = {
+const char * const xpm_split_view[] = {
 "22 22 2 1",
 ". c None",
 "# c #000000",
@@ -174,7 +174,7 @@ const char *xpm_split_view[] = {
 "......................",
 "......................"};
 
-const char *xpm_symbol_no[] = {
+const char * const xpm_symbol_no[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -191,7 +191,7 @@ const char *xpm_symbol_no[] = {
 " .......... ",
 "            "};
 
-const char *xpm_symbol_mod[] = {
+const char * const xpm_symbol_mod[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -208,7 +208,7 @@ const char *xpm_symbol_mod[] = {
 " .......... ",
 "            "};
 
-const char *xpm_symbol_yes[] = {
+const char * const xpm_symbol_yes[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -225,7 +225,7 @@ const char *xpm_symbol_yes[] = {
 " .......... ",
 "            "};
 
-const char *xpm_choice_no[] = {
+const char * const xpm_choice_no[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -242,7 +242,7 @@ const char *xpm_choice_no[] = {
 "    ....    ",
 "            "};
 
-const char *xpm_choice_yes[] = {
+const char * const xpm_choice_yes[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -259,7 +259,7 @@ const char *xpm_choice_yes[] = {
 "    ....    ",
 "            "};
 
-const char *xpm_menu[] = {
+const char * const xpm_menu[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -276,7 +276,7 @@ const char *xpm_menu[] = {
 " .......... ",
 "            "};
 
-const char *xpm_menu_inv[] = {
+const char * const xpm_menu_inv[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -293,7 +293,7 @@ const char *xpm_menu_inv[] = {
 " .......... ",
 "            "};
 
-const char *xpm_menuback[] = {
+const char * const xpm_menuback[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
@@ -310,7 +310,7 @@ const char *xpm_menuback[] = {
 " .......... ",
 "            "};
 
-const char *xpm_void[] = {
+const char * const xpm_void[] = {
 "12 12 2 1",
 "  c white",
 ". c black",
index d8ff614..7212dec 100644 (file)
 extern "C" {
 #endif
 
-extern const char *xpm_load[];
-extern const char *xpm_save[];
-extern const char *xpm_back[];
-extern const char *xpm_tree_view[];
-extern const char *xpm_single_view[];
-extern const char *xpm_split_view[];
-extern const char *xpm_symbol_no[];
-extern const char *xpm_symbol_mod[];
-extern const char *xpm_symbol_yes[];
-extern const char *xpm_choice_no[];
-extern const char *xpm_choice_yes[];
-extern const char *xpm_menu[];
-extern const char *xpm_menu_inv[];
-extern const char *xpm_menuback[];
-extern const char *xpm_void[];
+extern const char * const xpm_load[];
+extern const char * const xpm_save[];
+extern const char * const xpm_back[];
+extern const char * const xpm_tree_view[];
+extern const char * const xpm_single_view[];
+extern const char * const xpm_split_view[];
+extern const char * const xpm_symbol_no[];
+extern const char * const xpm_symbol_mod[];
+extern const char * const xpm_symbol_yes[];
+extern const char * const xpm_choice_no[];
+extern const char * const xpm_choice_yes[];
+extern const char * const xpm_menu[];
+extern const char * const xpm_menu_inv[];
+extern const char * const xpm_menuback[];
+extern const char * const xpm_void[];
 
 #ifdef __cplusplus
 }
index 6354c90..240109f 100644 (file)
@@ -36,7 +36,7 @@ struct buffer {
        YY_BUFFER_STATE state;
 };
 
-struct buffer *current_buf;
+static struct buffer *current_buf;
 
 static int last_ts, first_ts;
 
@@ -105,7 +105,7 @@ n   [A-Za-z0-9_-]
 "endchoice"            return T_ENDCHOICE;
 "endif"                        return T_ENDIF;
 "endmenu"              return T_ENDMENU;
-"help"|"---help---"    return T_HELP;
+"help"                 return T_HELP;
 "hex"                  return T_HEX;
 "if"                   return T_IF;
 "imply"                        return T_IMPLY;
index 23d1cb0..bc390df 100644 (file)
@@ -31,11 +31,6 @@ static ConfigSettings *configSettings;
 
 QAction *ConfigMainWindow::saveAction;
 
-static inline QString qgettext(const char* str)
-{
-       return QString::fromLocal8Bit(str);
-}
-
 ConfigSettings::ConfigSettings()
        : QSettings("kernel.org", "qconf")
 {
@@ -79,6 +74,13 @@ bool ConfigSettings::writeSizes(const QString& key, const QList<int>& value)
        return true;
 }
 
+QIcon ConfigItem::symbolYesIcon;
+QIcon ConfigItem::symbolModIcon;
+QIcon ConfigItem::symbolNoIcon;
+QIcon ConfigItem::choiceYesIcon;
+QIcon ConfigItem::choiceNoIcon;
+QIcon ConfigItem::menuIcon;
+QIcon ConfigItem::menubackIcon;
 
 /*
  * set the new data
@@ -102,14 +104,14 @@ void ConfigItem::updateMenu(void)
 
        list = listView();
        if (goParent) {
-               setPixmap(promptColIdx, list->menuBackPix);
+               setIcon(promptColIdx, menubackIcon);
                prompt = "..";
                goto set_prompt;
        }
 
        sym = menu->sym;
        prop = menu->prompt;
-       prompt = qgettext(menu_get_prompt(menu));
+       prompt = menu_get_prompt(menu);
 
        if (prop) switch (prop->type) {
        case P_MENU:
@@ -119,15 +121,15 @@ void ConfigItem::updateMenu(void)
                         */
                        if (sym && list->rootEntry == menu)
                                break;
-                       setPixmap(promptColIdx, list->menuPix);
+                       setIcon(promptColIdx, menuIcon);
                } else {
                        if (sym)
                                break;
-                       setPixmap(promptColIdx, QIcon());
+                       setIcon(promptColIdx, QIcon());
                }
                goto set_prompt;
        case P_COMMENT:
-               setPixmap(promptColIdx, QIcon());
+               setIcon(promptColIdx, QIcon());
                goto set_prompt;
        default:
                ;
@@ -135,7 +137,7 @@ void ConfigItem::updateMenu(void)
        if (!sym)
                goto set_prompt;
 
-       setText(nameColIdx, QString::fromLocal8Bit(sym->name));
+       setText(nameColIdx, sym->name);
 
        type = sym_get_type(sym);
        switch (type) {
@@ -144,7 +146,7 @@ void ConfigItem::updateMenu(void)
                char ch;
 
                if (!sym_is_changeable(sym) && list->optMode == normalOpt) {
-                       setPixmap(promptColIdx, QIcon());
+                       setIcon(promptColIdx, QIcon());
                        setText(noColIdx, QString());
                        setText(modColIdx, QString());
                        setText(yesColIdx, QString());
@@ -154,22 +156,22 @@ void ConfigItem::updateMenu(void)
                switch (expr) {
                case yes:
                        if (sym_is_choice_value(sym) && type == S_BOOLEAN)
-                               setPixmap(promptColIdx, list->choiceYesPix);
+                               setIcon(promptColIdx, choiceYesIcon);
                        else
-                               setPixmap(promptColIdx, list->symbolYesPix);
+                               setIcon(promptColIdx, symbolYesIcon);
                        setText(yesColIdx, "Y");
                        ch = 'Y';
                        break;
                case mod:
-                       setPixmap(promptColIdx, list->symbolModPix);
+                       setIcon(promptColIdx, symbolModIcon);
                        setText(modColIdx, "M");
                        ch = 'M';
                        break;
                default:
                        if (sym_is_choice_value(sym) && type == S_BOOLEAN)
-                               setPixmap(promptColIdx, list->choiceNoPix);
+                               setIcon(promptColIdx, choiceNoIcon);
                        else
-                               setPixmap(promptColIdx, list->symbolNoPix);
+                               setIcon(promptColIdx, symbolNoIcon);
                        setText(noColIdx, "N");
                        ch = 'N';
                        break;
@@ -265,7 +267,7 @@ void ConfigLineEdit::show(ConfigItem* i)
 {
        item = i;
        if (sym_get_string_value(item->menu->sym))
-               setText(QString::fromLocal8Bit(sym_get_string_value(item->menu->sym)));
+               setText(sym_get_string_value(item->menu->sym));
        else
                setText(QString());
        Parent::show();
@@ -280,7 +282,7 @@ void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
        case Qt::Key_Return:
        case Qt::Key_Enter:
                sym_set_string_value(item->menu->sym, text().toLatin1());
-               parent()->updateList(item);
+               parent()->updateList();
                break;
        default:
                Parent::keyPressEvent(e);
@@ -294,9 +296,6 @@ void ConfigLineEdit::keyPressEvent(QKeyEvent* e)
 ConfigList::ConfigList(ConfigView* p, const char *name)
        : Parent(p),
          updateAll(false),
-         symbolYesPix(xpm_symbol_yes), symbolModPix(xpm_symbol_mod), symbolNoPix(xpm_symbol_no),
-         choiceYesPix(xpm_choice_yes), choiceNoPix(xpm_choice_no),
-         menuPix(xpm_menu), menuInvPix(xpm_menu_inv), menuBackPix(xpm_menuback), voidPix(xpm_void),
          showName(false), showRange(false), showData(false), mode(singleMode), optMode(normalOpt),
          rootEntry(0), headerPopup(0)
 {
@@ -322,7 +321,7 @@ ConfigList::ConfigList(ConfigView* p, const char *name)
                connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
        }
 
-       addColumn(promptColIdx);
+       showColumn(promptColIdx);
 
        reinit();
 }
@@ -340,21 +339,33 @@ bool ConfigList::menuSkip(struct menu *menu)
 
 void ConfigList::reinit(void)
 {
-       removeColumn(dataColIdx);
-       removeColumn(yesColIdx);
-       removeColumn(modColIdx);
-       removeColumn(noColIdx);
-       removeColumn(nameColIdx);
+       hideColumn(dataColIdx);
+       hideColumn(yesColIdx);
+       hideColumn(modColIdx);
+       hideColumn(noColIdx);
+       hideColumn(nameColIdx);
 
        if (showName)
-               addColumn(nameColIdx);
+               showColumn(nameColIdx);
        if (showRange) {
-               addColumn(noColIdx);
-               addColumn(modColIdx);
-               addColumn(yesColIdx);
+               showColumn(noColIdx);
+               showColumn(modColIdx);
+               showColumn(yesColIdx);
        }
        if (showData)
-               addColumn(dataColIdx);
+               showColumn(dataColIdx);
+
+       updateListAll();
+}
+
+void ConfigList::setOptionMode(QAction *action)
+{
+       if (action == showNormalAction)
+               optMode = normalOpt;
+       else if (action == showAllAction)
+               optMode = allOpt;
+       else
+               optMode = promptOpt;
 
        updateListAll();
 }
@@ -404,15 +415,15 @@ void ConfigList::updateSelection(void)
                emit menuSelected(menu);
 }
 
-void ConfigList::updateList(ConfigItem* item)
+void ConfigList::updateList()
 {
        ConfigItem* last = 0;
+       ConfigItem *item;
 
        if (!rootEntry) {
                if (mode != listMode)
                        goto update;
                QTreeWidgetItemIterator it(this);
-               ConfigItem* item;
 
                while (*it) {
                        item = (ConfigItem*)(*it);
@@ -446,7 +457,7 @@ void ConfigList::updateList(ConfigItem* item)
                return;
        }
 update:
-       updateMenuList(this, rootEntry);
+       updateMenuList(rootEntry);
        update();
        resizeColumnToContents(0);
 }
@@ -471,7 +482,7 @@ void ConfigList::setValue(ConfigItem* item, tristate val)
                        return;
                if (oldval == no && item->menu->list)
                        item->setExpanded(true);
-               parent()->updateList(item);
+               parent()->updateList();
                break;
        }
 }
@@ -505,7 +516,7 @@ void ConfigList::changeValue(ConfigItem* item)
                                item->setExpanded(true);
                }
                if (oldexpr != newexpr)
-                       parent()->updateList(item);
+                       parent()->updateList();
                break;
        case S_INT:
        case S_HEX:
@@ -524,7 +535,7 @@ void ConfigList::setRootMenu(struct menu *menu)
        type = menu && menu->prompt ? menu->prompt->type : P_UNKNOWN;
        if (type != P_MENU)
                return;
-       updateMenuList(this, 0);
+       updateMenuList(0);
        rootEntry = menu;
        updateListAll();
        if (currentItem()) {
@@ -628,7 +639,7 @@ hide:
        }
 }
 
-void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
+void ConfigList::updateMenuList(struct menu *menu)
 {
        struct menu* child;
        ConfigItem* item;
@@ -637,19 +648,19 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
        enum prop_type type;
 
        if (!menu) {
-               while (parent->topLevelItemCount() > 0)
+               while (topLevelItemCount() > 0)
                {
-                       delete parent->takeTopLevelItem(0);
+                       delete takeTopLevelItem(0);
                }
 
                return;
        }
 
-       last = (ConfigItem*)parent->topLevelItem(0);
+       last = (ConfigItem *)topLevelItem(0);
        if (last && !last->goParent)
                last = 0;
        for (child = menu->list; child; child = child->next) {
-               item = last ? last->nextSibling() : (ConfigItem*)parent->topLevelItem(0);
+               item = last ? last->nextSibling() : (ConfigItem *)topLevelItem(0);
                type = child->prompt ? child->prompt->type : P_UNKNOWN;
 
                switch (mode) {
@@ -670,7 +681,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
                        if (!child->sym && !child->list && !child->prompt)
                                continue;
                        if (!item || item->menu != child)
-                               item = new ConfigItem(parent, last, child, visible);
+                               item = new ConfigItem(this, last, child, visible);
                        else
                                item->testUpdateMenu(visible);
 
@@ -683,7 +694,7 @@ void ConfigList::updateMenuList(ConfigList *parent, struct menu* menu)
                }
 hide:
                if (item && item->menu == child) {
-                       last = (ConfigItem*)parent->topLevelItem(0);
+                       last = (ConfigItem *)topLevelItem(0);
                        if (last == item)
                                last = 0;
                        else while (last->nextSibling() != item)
@@ -774,7 +785,7 @@ void ConfigList::mouseReleaseEvent(QMouseEvent* e)
        idx = header()->logicalIndexAt(x);
        switch (idx) {
        case promptColIdx:
-               icon = item->pixmap(promptColIdx);
+               icon = item->icon(promptColIdx);
                if (!icon.isNull()) {
                        int off = header()->sectionPosition(0) + visualRect(indexAt(p)).x() + 4; // 4 is Hardcoded image offset. There might be a way to do it properly.
                        if (x >= off && x < off + icon.availableSizes().first().width()) {
@@ -785,7 +796,8 @@ void ConfigList::mouseReleaseEvent(QMouseEvent* e)
                                        break;
                                ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
                                if (ptype == P_MENU && rootEntry != menu &&
-                                   mode != fullMode && mode != menuMode)
+                                   mode != fullMode && mode != menuMode &&
+                                    mode != listMode)
                                        emit menuSelected(menu);
                                else
                                        changeValue(item);
@@ -835,7 +847,7 @@ void ConfigList::mouseDoubleClickEvent(QMouseEvent* e)
        if (!menu)
                goto skip;
        ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
-       if (ptype == P_MENU) {
+       if (ptype == P_MENU && mode != listMode) {
                if (mode == singleMode)
                        emit itemSelected(menu);
                else if (mode == symbolMode)
@@ -864,46 +876,46 @@ void ConfigList::focusInEvent(QFocusEvent *e)
 
 void ConfigList::contextMenuEvent(QContextMenuEvent *e)
 {
-       if (e->y() <= header()->geometry().bottom()) {
-               if (!headerPopup) {
-                       QAction *action;
-
-                       headerPopup = new QMenu(this);
-                       action = new QAction("Show Name", this);
-                         action->setCheckable(true);
-                         connect(action, SIGNAL(toggled(bool)),
-                                 parent(), SLOT(setShowName(bool)));
-                         connect(parent(), SIGNAL(showNameChanged(bool)),
-                                 action, SLOT(setOn(bool)));
-                         action->setChecked(showName);
-                         headerPopup->addAction(action);
-                       action = new QAction("Show Range", this);
-                         action->setCheckable(true);
-                         connect(action, SIGNAL(toggled(bool)),
-                                 parent(), SLOT(setShowRange(bool)));
-                         connect(parent(), SIGNAL(showRangeChanged(bool)),
-                                 action, SLOT(setOn(bool)));
-                         action->setChecked(showRange);
-                         headerPopup->addAction(action);
-                       action = new QAction("Show Data", this);
-                         action->setCheckable(true);
-                         connect(action, SIGNAL(toggled(bool)),
-                                 parent(), SLOT(setShowData(bool)));
-                         connect(parent(), SIGNAL(showDataChanged(bool)),
-                                 action, SLOT(setOn(bool)));
-                         action->setChecked(showData);
-                         headerPopup->addAction(action);
-               }
-               headerPopup->exec(e->globalPos());
-               e->accept();
-       } else
-               e->ignore();
+       if (!headerPopup) {
+               QAction *action;
+
+               headerPopup = new QMenu(this);
+               action = new QAction("Show Name", this);
+               action->setCheckable(true);
+               connect(action, SIGNAL(toggled(bool)),
+                       parent(), SLOT(setShowName(bool)));
+               connect(parent(), SIGNAL(showNameChanged(bool)),
+                       action, SLOT(setOn(bool)));
+               action->setChecked(showName);
+               headerPopup->addAction(action);
+
+               action = new QAction("Show Range", this);
+               action->setCheckable(true);
+               connect(action, SIGNAL(toggled(bool)),
+                       parent(), SLOT(setShowRange(bool)));
+               connect(parent(), SIGNAL(showRangeChanged(bool)),
+                       action, SLOT(setOn(bool)));
+               action->setChecked(showRange);
+               headerPopup->addAction(action);
+
+               action = new QAction("Show Data", this);
+               action->setCheckable(true);
+               connect(action, SIGNAL(toggled(bool)),
+                       parent(), SLOT(setShowData(bool)));
+               connect(parent(), SIGNAL(showDataChanged(bool)),
+                       action, SLOT(setOn(bool)));
+               action->setChecked(showData);
+               headerPopup->addAction(action);
+       }
+
+       headerPopup->exec(e->globalPos());
+       e->accept();
 }
 
 ConfigView*ConfigView::viewList;
-QAction *ConfigView::showNormalAction;
-QAction *ConfigView::showAllAction;
-QAction *ConfigView::showPromptAction;
+QAction *ConfigList::showNormalAction;
+QAction *ConfigList::showAllAction;
+QAction *ConfigList::showPromptAction;
 
 ConfigView::ConfigView(QWidget* parent, const char *name)
        : Parent(parent)
@@ -934,18 +946,6 @@ ConfigView::~ConfigView(void)
        }
 }
 
-void ConfigView::setOptionMode(QAction *act)
-{
-       if (act == showNormalAction)
-               list->optMode = normalOpt;
-       else if (act == showAllAction)
-               list->optMode = allOpt;
-       else
-               list->optMode = promptOpt;
-
-       list->updateListAll();
-}
-
 void ConfigView::setShowName(bool b)
 {
        if (list->showName != b) {
@@ -984,12 +984,12 @@ void ConfigList::setAllOpen(bool open)
        }
 }
 
-void ConfigView::updateList(ConfigItem* item)
+void ConfigView::updateList()
 {
        ConfigView* v;
 
        for (v = viewList; v; v = v->nextView)
-               v->list->updateList(item);
+               v->list->updateList();
 }
 
 void ConfigView::updateListAll(void)
@@ -1287,16 +1287,17 @@ void ConfigInfoView::contextMenuEvent(QContextMenuEvent *e)
        Parent::contextMenuEvent(e);
 }
 
-ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow* parent, const char *name)
+ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow *parent)
        : Parent(parent), result(NULL)
 {
-       setObjectName(name);
+       setObjectName("search");
        setWindowTitle("Search Config");
 
        QVBoxLayout* layout1 = new QVBoxLayout(this);
        layout1->setContentsMargins(11, 11, 11, 11);
        layout1->setSpacing(6);
-       QHBoxLayout* layout2 = new QHBoxLayout(0);
+
+       QHBoxLayout* layout2 = new QHBoxLayout();
        layout2->setContentsMargins(0, 0, 0, 0);
        layout2->setSpacing(6);
        layout2->addWidget(new QLabel("Find:", this));
@@ -1311,9 +1312,9 @@ ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow* parent, const char *nam
 
        split = new QSplitter(this);
        split->setOrientation(Qt::Vertical);
-       list = new ConfigView(split, name);
+       list = new ConfigView(split, "search");
        list->list->mode = listMode;
-       info = new ConfigInfoView(split, name);
+       info = new ConfigInfoView(split, "search");
        connect(list->list, SIGNAL(menuChanged(struct menu *)),
                info, SLOT(setInfo(struct menu *)));
        connect(list->list, SIGNAL(menuChanged(struct menu *)),
@@ -1321,25 +1322,23 @@ ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow* parent, const char *nam
 
        layout1->addWidget(split);
 
-       if (name) {
-               QVariant x, y;
-               int width, height;
-               bool ok;
+       QVariant x, y;
+       int width, height;
+       bool ok;
 
-               configSettings->beginGroup(name);
-               width = configSettings->value("/window width", parent->width() / 2).toInt();
-               height = configSettings->value("/window height", parent->height() / 2).toInt();
-               resize(width, height);
-               x = configSettings->value("/window x");
-               y = configSettings->value("/window y");
-               if ((x.isValid())&&(y.isValid()))
-                       move(x.toInt(), y.toInt());
-               QList<int> sizes = configSettings->readSizes("/split", &ok);
-               if (ok)
-                       split->setSizes(sizes);
-               configSettings->endGroup();
-               connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
-       }
+       configSettings->beginGroup("search");
+       width = configSettings->value("/window width", parent->width() / 2).toInt();
+       height = configSettings->value("/window height", parent->height() / 2).toInt();
+       resize(width, height);
+       x = configSettings->value("/window x");
+       y = configSettings->value("/window y");
+       if (x.isValid() && y.isValid())
+               move(x.toInt(), y.toInt());
+       QList<int> sizes = configSettings->readSizes("/split", &ok);
+       if (ok)
+               split->setSizes(sizes);
+       configSettings->endGroup();
+       connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
 }
 
 void ConfigSearchWindow::saveSettings(void)
@@ -1381,7 +1380,6 @@ void ConfigSearchWindow::search(void)
 ConfigMainWindow::ConfigMainWindow(void)
        : searchWindow(0)
 {
-       QMenuBar* menu;
        bool ok = true;
        QVariant x, y;
        int width, height;
@@ -1402,6 +1400,15 @@ ConfigMainWindow::ConfigMainWindow(void)
        if ((x.isValid())&&(y.isValid()))
                move(x.toInt(), y.toInt());
 
+       // set up icons
+       ConfigItem::symbolYesIcon = QIcon(QPixmap(xpm_symbol_yes));
+       ConfigItem::symbolModIcon = QIcon(QPixmap(xpm_symbol_mod));
+       ConfigItem::symbolNoIcon = QIcon(QPixmap(xpm_symbol_no));
+       ConfigItem::choiceYesIcon = QIcon(QPixmap(xpm_choice_yes));
+       ConfigItem::choiceNoIcon = QIcon(QPixmap(xpm_choice_no));
+       ConfigItem::menuIcon = QIcon(QPixmap(xpm_menu));
+       ConfigItem::menubackIcon = QIcon(QPixmap(xpm_menuback));
+
        QWidget *widget = new QWidget(this);
        QVBoxLayout *layout = new QVBoxLayout(widget);
        setCentralWidget(widget);
@@ -1432,10 +1439,6 @@ ConfigMainWindow::ConfigMainWindow(void)
        setTabOrder(configList, helpText);
        configList->setFocus();
 
-       menu = menuBar();
-       toolBar = new QToolBar("Tools", this);
-       addToolBar(toolBar);
-
        backAction = new QAction(QPixmap(xpm_back), "Back", this);
        connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
 
@@ -1485,17 +1488,17 @@ ConfigMainWindow::ConfigMainWindow(void)
 
        QActionGroup *optGroup = new QActionGroup(this);
        optGroup->setExclusive(true);
-       connect(optGroup, SIGNAL(triggered(QAction*)), configView,
+       connect(optGroup, SIGNAL(triggered(QAction*)), configList,
                SLOT(setOptionMode(QAction *)));
-       connect(optGroup, SIGNAL(triggered(QAction *)), menuView,
+       connect(optGroup, SIGNAL(triggered(QAction *)), menuList,
                SLOT(setOptionMode(QAction *)));
 
-       configView->showNormalAction = new QAction("Show Normal Options", optGroup);
-       configView->showAllAction = new QAction("Show All Options", optGroup);
-       configView->showPromptAction = new QAction("Show Prompt Options", optGroup);
-       configView->showNormalAction->setCheckable(true);
-       configView->showAllAction->setCheckable(true);
-       configView->showPromptAction->setCheckable(true);
+       ConfigList::showNormalAction = new QAction("Show Normal Options", optGroup);
+       ConfigList::showNormalAction->setCheckable(true);
+       ConfigList::showAllAction = new QAction("Show All Options", optGroup);
+       ConfigList::showAllAction->setCheckable(true);
+       ConfigList::showPromptAction = new QAction("Show Prompt Options", optGroup);
+       ConfigList::showPromptAction->setCheckable(true);
 
        QAction *showDebugAction = new QAction("Show Debug Info", this);
          showDebugAction->setCheckable(true);
@@ -1508,6 +1511,7 @@ ConfigMainWindow::ConfigMainWindow(void)
          connect(showAboutAction, SIGNAL(triggered(bool)), SLOT(showAbout()));
 
        // init tool bar
+       QToolBar *toolBar = addToolBar("Tools");
        toolBar->addAction(backAction);
        toolBar->addSeparator();
        toolBar->addAction(loadAction);
@@ -1517,33 +1521,32 @@ ConfigMainWindow::ConfigMainWindow(void)
        toolBar->addAction(splitViewAction);
        toolBar->addAction(fullViewAction);
 
-       // create config menu
-       QMenu* config = menu->addMenu("&File");
-       config->addAction(loadAction);
-       config->addAction(saveAction);
-       config->addAction(saveAsAction);
-       config->addSeparator();
-       config->addAction(quitAction);
+       // create file menu
+       QMenu *menu = menuBar()->addMenu("&File");
+       menu->addAction(loadAction);
+       menu->addAction(saveAction);
+       menu->addAction(saveAsAction);
+       menu->addSeparator();
+       menu->addAction(quitAction);
 
        // create edit menu
-       QMenu* editMenu = menu->addMenu("&Edit");
-       editMenu->addAction(searchAction);
+       menu = menuBar()->addMenu("&Edit");
+       menu->addAction(searchAction);
 
        // create options menu
-       QMenu* optionMenu = menu->addMenu("&Option");
-       optionMenu->addAction(showNameAction);
-       optionMenu->addAction(showRangeAction);
-       optionMenu->addAction(showDataAction);
-       optionMenu->addSeparator();
-       optionMenu->addActions(optGroup->actions());
-       optionMenu->addSeparator();
-       optionMenu->addAction(showDebugAction);
+       menu = menuBar()->addMenu("&Option");
+       menu->addAction(showNameAction);
+       menu->addAction(showRangeAction);
+       menu->addAction(showDataAction);
+       menu->addSeparator();
+       menu->addActions(optGroup->actions());
+       menu->addSeparator();
+       menu->addAction(showDebugAction);
 
        // create help menu
-       menu->addSeparator();
-       QMenu* helpMenu = menu->addMenu("&Help");
-       helpMenu->addAction(showIntroAction);
-       helpMenu->addAction(showAboutAction);
+       menu = menuBar()->addMenu("&Help");
+       menu->addAction(showIntroAction);
+       menu->addAction(showAboutAction);
 
        connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
                 helpText, SLOT (clicked (const QUrl &)) );
@@ -1646,7 +1649,7 @@ void ConfigMainWindow::saveConfigAs(void)
 void ConfigMainWindow::searchConfig(void)
 {
        if (!searchWindow)
-               searchWindow = new ConfigSearchWindow(this, "search");
+               searchWindow = new ConfigSearchWindow(this);
        searchWindow->show();
 }
 
index 5eeab4a..461df64 100644 (file)
@@ -69,11 +69,13 @@ protected:
 public slots:
        void setRootMenu(struct menu *menu);
 
-       void updateList(ConfigItem *item);
+       void updateList();
        void setValue(ConfigItem* item, tristate val);
        void changeValue(ConfigItem* item);
        void updateSelection(void);
        void saveSettings(void);
+       void setOptionMode(QAction *action);
+
 signals:
        void menuChanged(struct menu *menu);
        void menuSelected(struct menu *menu);
@@ -85,35 +87,19 @@ public:
        void updateListAll(void)
        {
                updateAll = true;
-               updateList(NULL);
+               updateList();
                updateAll = false;
        }
-       ConfigList* listView()
-       {
-               return this;
-       }
-       void addColumn(colIdx idx)
-       {
-               showColumn(idx);
-       }
-       void removeColumn(colIdx idx)
-       {
-               hideColumn(idx);
-       }
        void setAllOpen(bool open);
        void setParentMenu(void);
 
        bool menuSkip(struct menu *);
 
        void updateMenuList(ConfigItem *parent, struct menu*);
-       void updateMenuList(ConfigList *parent, struct menu*);
+       void updateMenuList(struct menu *menu);
 
        bool updateAll;
 
-       QPixmap symbolYesPix, symbolModPix, symbolNoPix;
-       QPixmap choiceYesPix, choiceNoPix;
-       QPixmap menuPix, menuInvPix, menuBackPix, voidPix;
-
        bool showName, showRange, showData;
        enum listMode mode;
        enum optionMode optMode;
@@ -121,6 +107,8 @@ public:
        QPalette disabledColorGroup;
        QPalette inactivedColorGroup;
        QMenu* headerPopup;
+
+       static QAction *showNormalAction, *showAllAction, *showPromptAction;
 };
 
 class ConfigItem : public QTreeWidgetItem {
@@ -168,28 +156,16 @@ public:
 
                return ret;
        }
-       void setText(colIdx idx, const QString& text)
-       {
-               Parent::setText(idx, text);
-       }
-       QString text(colIdx idx) const
-       {
-               return Parent::text(idx);
-       }
-       void setPixmap(colIdx idx, const QIcon &icon)
-       {
-               Parent::setIcon(idx, icon);
-       }
-       const QIcon pixmap(colIdx idx) const
-       {
-               return icon(idx);
-       }
        // TODO: Implement paintCell
 
        ConfigItem* nextItem;
        struct menu *menu;
        bool visible;
        bool goParent;
+
+       static QIcon symbolYesIcon, symbolModIcon, symbolNoIcon;
+       static QIcon choiceYesIcon, choiceNoIcon;
+       static QIcon menuIcon, menubackIcon;
 };
 
 class ConfigLineEdit : public QLineEdit {
@@ -214,7 +190,7 @@ class ConfigView : public QWidget {
 public:
        ConfigView(QWidget* parent, const char *name = 0);
        ~ConfigView(void);
-       static void updateList(ConfigItem* item);
+       static void updateList();
        static void updateListAll(void);
 
        bool showName(void) const { return list->showName; }
@@ -224,7 +200,6 @@ public slots:
        void setShowName(bool);
        void setShowRange(bool);
        void setShowData(bool);
-       void setOptionMode(QAction *);
 signals:
        void showNameChanged(bool);
        void showRangeChanged(bool);
@@ -235,10 +210,6 @@ public:
 
        static ConfigView* viewList;
        ConfigView* nextView;
-
-       static QAction *showNormalAction;
-       static QAction *showAllAction;
-       static QAction *showPromptAction;
 };
 
 class ConfigInfoView : public QTextBrowser {
@@ -276,7 +247,7 @@ class ConfigSearchWindow : public QDialog {
        Q_OBJECT
        typedef class QDialog Parent;
 public:
-       ConfigSearchWindow(ConfigMainWindow* parent, const char *name = 0);
+       ConfigSearchWindow(ConfigMainWindow *parent);
 
 public slots:
        void saveSettings(void);
@@ -326,7 +297,6 @@ protected:
        ConfigView *configView;
        ConfigList *configList;
        ConfigInfoView *helpText;
-       QToolBar *toolBar;
        QAction *backAction;
        QAction *singleViewAction;
        QAction *splitViewAction;
index 9363e37..ffa3ec6 100644 (file)
@@ -15,15 +15,21 @@ struct symbol symbol_yes = {
        .name = "y",
        .curr = { "y", yes },
        .flags = SYMBOL_CONST|SYMBOL_VALID,
-}, symbol_mod = {
+};
+
+struct symbol symbol_mod = {
        .name = "m",
        .curr = { "m", mod },
        .flags = SYMBOL_CONST|SYMBOL_VALID,
-}, symbol_no = {
+};
+
+struct symbol symbol_no = {
        .name = "n",
        .curr = { "n", no },
        .flags = SYMBOL_CONST|SYMBOL_VALID,
-}, symbol_empty = {
+};
+
+static struct symbol symbol_empty = {
        .name = "",
        .curr = { "", no },
        .flags = SYMBOL_VALID,
@@ -31,7 +37,7 @@ struct symbol symbol_yes = {
 
 struct symbol *sym_defconfig_list;
 struct symbol *modules_sym;
-tristate modules_val;
+static tristate modules_val;
 
 enum symbol_type sym_get_type(struct symbol *sym)
 {
index e26f02d..e6e2d9e 100755 (executable)
 # Error out on error
 set -e
 
+LD="$1"
+KBUILD_LDFLAGS="$2"
+LDFLAGS_vmlinux="$3"
+
 # Nice output in kbuild format
 # Will be supressed by "make -s"
 info()
index 296b6a3..7807168 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 OBJECT_FILES_NON_STANDARD := y
 
-hostprogs      := modpost mk_elfconfig
-always-y       := $(hostprogs) empty.o
+hostprogs-always-y     += modpost mk_elfconfig
+always-y               += empty.o
 
 modpost-objs   := modpost.o file2alias.o sumversion.o
 
index fb1578e..936198a 100755 (executable)
@@ -53,6 +53,18 @@ rm -rf -- "${tmpdir}"
 mkdir -p -- "${tmpdir}/boot"
 dirs=boot
 
+
+#
+# Try to install dtbs
+#
+if grep -q '^CONFIG_OF_EARLY_FLATTREE=y' include/config/auto.conf; then
+       # Only some architectures with OF support have this target
+       if [ -d "${srctree}/arch/${SRCARCH}/boot/dts" ]; then
+               $MAKE ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_DTBS_PATH="${tmpdir}/boot/dtbs/${KERNELRELEASE}" dtbs_install
+    fi
+fi
+
+
 #
 # Try to install modules
 #
index df1adbf..48fbd3d 100755 (executable)
@@ -175,7 +175,7 @@ Section: kernel
 Priority: optional
 Maintainer: $maintainer
 Build-Depends: bc, rsync, kmod, cpio, bison, flex | flex:native $extra_build_depends
-Homepage: http://www.kernel.org/
+Homepage: https://www.kernel.org/
 
 Package: $packagename
 Architecture: $debarch
index 8640c27..7c477ca 100755 (executable)
@@ -46,7 +46,7 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
        License: GPL
        Group: System Environment/Kernel
        Vendor: The Linux Community
-       URL: http://www.kernel.org
+       URL: https://www.kernel.org
 $S     Source: kernel-$__KERNELRELEASE.tar.gz
        Provides: $PROVIDES
        %define __spec_install_post /usr/lib/rpm/brp-compress || :
index e59022b..b9c2ee7 100644 (file)
@@ -42,6 +42,8 @@
 #define R_ARM_THM_CALL         10
 #define R_ARM_CALL             28
 
+#define R_AARCH64_CALL26       283
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static char gpfx;      /* prefix for global symbol name (sometimes '_') */
index 70cf8d9..1faf7f0 100644 (file)
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-hostprogs      := genheaders
+hostprogs-always-y += genheaders
 HOST_EXTRACFLAGS += \
        -I$(srctree)/include/uapi -I$(srctree)/include \
        -I$(srctree)/security/selinux/include
-
-always-y       := $(hostprogs)
index 3026f3c..d61058d 100644 (file)
@@ -1,8 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
-hostprogs      := mdp
+hostprogs-always-y += mdp
 HOST_EXTRACFLAGS += \
        -I$(srctree)/include/uapi -I$(srctree)/include \
        -I$(srctree)/security/selinux/include -I$(objtree)/include
 
-always-y       := $(hostprogs)
 clean-files    := policy.* file_contexts
index cd3cc7d..7561f6f 100644 (file)
@@ -118,7 +118,7 @@ config INTEL_TXT
          it was configured with, especially since they may be responsible for
          providing such assurances to VMs and services running on it.
 
-         See <http://www.intel.com/technology/security/> for more information
+         See <https://www.intel.com/technology/security/> for more information
          about Intel(R) TXT.
          See <http://tboot.sourceforge.net> for more information about tboot.
          See Documentation/x86/intel_txt.rst for a description of how to enable
index 03fae1b..348ed6c 100644 (file)
@@ -77,7 +77,7 @@ config SECURITY_APPARMOR_KUNIT_TEST
          This builds the AppArmor KUnit tests.
 
          KUnit tests run during boot and output the results to the debug log
-         in TAP format (http://testanything.org/). Only useful for kernel devs
+         in TAP format (https://testanything.org/). Only useful for kernel devs
          running KUnit test harness and are not for inclusion into a
          production build.
 
index 080c535..12e9250 100644 (file)
@@ -26,7 +26,7 @@ config IMA
          an aggregate integrity value over this list inside the
          TPM hardware, so that the TPM can prove to a third party
          whether or not critical system files have been modified.
-         Read <http://www.usenix.org/events/sec04/tech/sailer.html>
+         Read <https://www.usenix.org/events/sec04/tech/sailer.html>
          to learn more about IMA.
          If unsure, say N.
 
index 5a2def4..1e89e2d 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2013 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Author: Roberto Sassu <roberto.sassu@polito.it>
  *
index 635c6ac..41a5f43 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2013 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Author: Roberto Sassu <roberto.sassu@polito.it>
  *
index 9a88c79..6b3b880 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (C) 2013 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Author: Roberto Sassu <roberto.sassu@polito.it>
  *
index a7339d4..8fdd761 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2006 International Business Machines Corp.
  * Copyright (C) 2010 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Authors:
  * Michael A. Halcrow <mahalcro@us.ibm.com>
index 939621d..ed84665 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2006 International Business Machines Corp.
  * Copyright (C) 2010 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Authors:
  * Michael A. Halcrow <mahalcro@us.ibm.com>
index deebbf1..192e531 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Copyright (C) 2010 IBM Corporation
  * Copyright (C) 2010 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Authors:
  * Mimi Zohar <zohar@us.ibm.com>
index c68528a..e6d22ce 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Copyright (C) 2010 IBM Corporation
  * Copyright (C) 2010 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Authors:
  * Mimi Zohar <zohar@us.ibm.com>
index 53b3e1f..dc4ecc0 100644 (file)
@@ -914,7 +914,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
         * (represented by bprm).  'current' is the process doing
         * the execve().
         */
-       if (get_user_pages_remote(current, bprm->mm, pos, 1,
+       if (get_user_pages_remote(bprm->mm, pos, 1,
                                FOLL_FORCE, &page, NULL, NULL) <= 0)
                return false;
 #else
index 09ddab5..9766f6a 100644 (file)
@@ -46,6 +46,18 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_LIST_HEAD(&bus->hlink_list);
        init_waitqueue_head(&bus->rirb_wq);
        bus->irq = -1;
+
+       /*
+        * Default value of '8' is as per the HD audio specification (Rev 1.0a).
+        * Following relation is used to derive STRIPE control value.
+        *  For sample rate <= 48K:
+        *   { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
+        *  For sample rate > 48K:
+        *   { ((num_channels * bits_per_sample * rate/48000) /
+        *      number of SDOs) >= 8 }
+        */
+       bus->sdo_limit = 8;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_init);
index 011b17c..b98449f 100644 (file)
@@ -529,17 +529,6 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
 
        bus->chip_init = true;
 
-       /*
-        * Default value of '8' is as per the HD audio specification (Rev 1.0a).
-        * Following relation is used to derive STRIPE control value.
-        *  For sample rate <= 48K:
-        *   { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
-        *  For sample rate > 48K:
-        *   { ((num_channels * bits_per_sample * rate/48000) /
-        *      number of SDOs) >= 8 }
-        */
-       bus->sdo_limit = 8;
-
        return true;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip);
index 5363d88..2e5a5c5 100644 (file)
@@ -308,7 +308,7 @@ static inline int verify_mpu401(const struct snd_mpu401 *mpu)
 }
 
 /*
- * This is apparently the standard way to initailise an MPU-401
+ * This is apparently the standard way to initialise an MPU-401
  */
 static inline void initialise_mpu401(const struct snd_mpu401 *mpu)
 {
@@ -339,7 +339,7 @@ static void soundscape_free(struct snd_card *c)
 }
 
 /*
- * Tell the SoundScape to begin a DMA tranfer using the given channel.
+ * Tell the SoundScape to begin a DMA transfer using the given channel.
  * All locking issues are left to the caller.
  */
 static void sscape_start_dma_unsafe(unsigned io_base, enum GA_REG reg)
@@ -803,7 +803,7 @@ static int mpu401_open(struct snd_mpu401 *mpu)
 }
 
 /*
- * Initialse an MPU-401 subdevice for MIDI support on the SoundScape.
+ * Initialise an MPU-401 subdevice for MIDI support on the SoundScape.
  */
 static int create_mpu401(struct snd_card *card, int devnum,
                         unsigned long port, int irq)
index 6aeb99a..a20b2bb 100644 (file)
@@ -1950,8 +1950,7 @@ static int snd_echo_create(struct snd_card *card,
                snd_echo_free(chip);
                return -EBUSY;
        }
-       chip->dsp_registers = (volatile u32 __iomem *)
-               ioremap(chip->dsp_registers_phys, sz);
+       chip->dsp_registers = ioremap(chip->dsp_registers_phys, sz);
        if (!chip->dsp_registers) {
                dev_err(chip->card->dev, "ioremap failed\n");
                snd_echo_free(chip);
@@ -2213,7 +2212,6 @@ static int snd_echo_resume(struct device *dev)
        if (err < 0) {
                kfree(commpage_bak);
                dev_err(dev, "resume init_hw err=%d\n", err);
-               snd_echo_free(chip);
                return err;
        }
 
@@ -2240,7 +2238,6 @@ static int snd_echo_resume(struct device *dev)
        if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
                        KBUILD_MODNAME, chip)) {
                dev_err(chip->card->dev, "cannot grab irq\n");
-               snd_echo_free(chip);
                return -EBUSY;
        }
        chip->irq = pci->irq;
index 30c6409..0afe13f 100644 (file)
@@ -419,7 +419,7 @@ struct echoaudio {
        short asic_code;                /* Current ASIC code */
        u32 comm_page_phys;                     /* Physical address of the
                                                 * memory seen by DSP */
-       volatile u32 __iomem *dsp_registers;    /* DSP's register base */
+       u32 __iomem *dsp_registers;             /* DSP's register base */
        u32 active_mask;                        /* Chs. active mask or
                                                 * punks out */
 #ifdef CONFIG_PM_SLEEP
index 4bbd12d..b8c8490 100644 (file)
@@ -1863,6 +1863,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 }
 
 static const struct snd_pci_quirk force_connect_list[] = {
+       SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
        {}
 };
index 2477f3e..a1fa983 100644 (file)
@@ -4125,7 +4125,7 @@ static int micmute_led_set(struct led_classdev *led_cdev,
        struct alc_spec *spec = codec->spec;
 
        alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
-                           spec->micmute_led_polarity, !!brightness);
+                           spec->micmute_led_polarity, !brightness);
        return 0;
 }
 
@@ -4160,10 +4160,6 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
 static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
-       struct alc_spec *spec = codec->spec;
-
-       spec->micmute_led_polarity = 1;
-
        alc_fixup_hp_gpio_led(codec, action, 0x04, 0x01);
 }
 
@@ -6159,6 +6155,7 @@ enum {
        ALC269_FIXUP_CZC_L101,
        ALC269_FIXUP_LEMOTE_A1802,
        ALC269_FIXUP_LEMOTE_A190X,
+       ALC256_FIXUP_INTEL_NUC8_RUGGED,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7480,6 +7477,15 @@ static const struct hda_fixup alc269_fixups[] = {
                },
                .chain_id = ALC269_FIXUP_DMIC,
        },
+       [ALC256_FIXUP_INTEL_NUC8_RUGGED] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7688,6 +7694,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
        SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+       SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+       SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
@@ -7777,6 +7785,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+       SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
 
 #if 0
        /* Below is a quirk table taken from the old code.
@@ -7948,6 +7957,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
        {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+       {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index 55815fd..406526e 100644 (file)
@@ -138,7 +138,7 @@ static int acp3x_1015_hw_params(struct snd_pcm_substream *substream,
        srate = params_rate(params);
 
        for_each_rtd_codec_dais(rtd, i, codec_dai) {
-               if (strcmp(codec_dai->component->name, "rt1015-aif"))
+               if (strcmp(codec_dai->name, "rt1015-aif"))
                        continue;
                ret = snd_soc_dai_set_bclk_ratio(codec_dai, 64);
                if (ret < 0)
index 623dfd3..7b14d9a 100644 (file)
@@ -314,40 +314,30 @@ static int acp_pdm_dma_close(struct snd_soc_component *component,
        return 0;
 }
 
-static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream,
-                                struct snd_pcm_hw_params *params,
-                                struct snd_soc_dai *dai)
+static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
+                              int cmd, struct snd_soc_dai *dai)
 {
        struct pdm_stream_instance *rtd;
+       int ret;
+       bool pdm_status;
        unsigned int ch_mask;
 
        rtd = substream->runtime->private_data;
-       switch (params_channels(params)) {
+       ret = 0;
+       switch (substream->runtime->channels) {
        case TWO_CH:
                ch_mask = 0x00;
                break;
        default:
                return -EINVAL;
        }
-       rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
-       rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
-                 ACP_WOV_PDM_DECIMATION_FACTOR);
-       return 0;
-}
-
-static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
-                              int cmd, struct snd_soc_dai *dai)
-{
-       struct pdm_stream_instance *rtd;
-       int ret;
-       bool pdm_status;
-
-       rtd = substream->runtime->private_data;
-       ret = 0;
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+               rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
+                         ACP_WOV_PDM_DECIMATION_FACTOR);
                rtd->bytescount = acp_pdm_get_byte_count(rtd,
                                                         substream->stream);
                pdm_status = check_pdm_dma_status(rtd->acp_base);
@@ -369,7 +359,6 @@ static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
 }
 
 static struct snd_soc_dai_ops acp_pdm_dai_ops = {
-       .hw_params = acp_pdm_dai_hw_params,
        .trigger   = acp_pdm_dai_trigger,
 };
 
index 4428c62..3ddd822 100644 (file)
@@ -19,8 +19,8 @@
 
 #define CDC_D_REVISION1                        (0xf000)
 #define CDC_D_PERPH_SUBTYPE            (0xf005)
-#define CDC_D_INT_EN_SET               (0x015)
-#define CDC_D_INT_EN_CLR               (0x016)
+#define CDC_D_INT_EN_SET               (0xf015)
+#define CDC_D_INT_EN_CLR               (0xf016)
 #define MBHC_SWITCH_INT                        BIT(7)
 #define MBHC_MIC_ELECTRICAL_INS_REM_DET        BIT(6)
 #define MBHC_BUTTON_PRESS_DET          BIT(5)
index 68a3b48..3bce9a1 100644 (file)
@@ -412,8 +412,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
                  struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+       struct wm8994 *control = dev_get_drvdata(component->dev->parent);
        int i;
 
+       if (control->type != WM8958)
+               return 0;
+
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
        case SND_SOC_DAPM_PRE_PMU:
index 317916c..0623a22 100644 (file)
@@ -151,7 +151,6 @@ static const struct reg_default wm8962_reg[] = {
        { 40, 0x0000 },   /* R40    - SPKOUTL volume */
        { 41, 0x0000 },   /* R41    - SPKOUTR volume */
 
-       { 48, 0x0000 },   /* R48    - Additional control(4) */
        { 49, 0x0010 },   /* R49    - Class D Control 1 */
        { 51, 0x0003 },   /* R51    - Class D Control 2 */
 
@@ -842,6 +841,7 @@ static bool wm8962_readable_register(struct device *dev, unsigned int reg)
        case WM8962_SPKOUTL_VOLUME:
        case WM8962_SPKOUTR_VOLUME:
        case WM8962_THERMAL_SHUTDOWN_STATUS:
+       case WM8962_ADDITIONAL_CONTROL_4:
        case WM8962_CLASS_D_CONTROL_1:
        case WM8962_CLASS_D_CONTROL_2:
        case WM8962_CLOCKING_4:
index a84ae87..038be66 100644 (file)
 #define WM8994_NUM_DRC 3
 #define WM8994_NUM_EQ  3
 
-static struct {
+struct wm8994_reg_mask {
        unsigned int reg;
        unsigned int mask;
-} wm8994_vu_bits[] = {
+};
+
+static struct wm8994_reg_mask wm8994_vu_bits[] = {
        { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
        { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
        { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
@@ -60,14 +62,10 @@ static struct {
 
        { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
        { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
-       { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
-       { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
        { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
        { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
        { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
        { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
-       { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
-       { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
        { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
        { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
        { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
@@ -76,6 +74,14 @@ static struct {
        { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
 };
 
+/* VU bitfields for ADC2, DAC2 not available on WM1811 */
+static struct wm8994_reg_mask wm8994_adc2_dac2_vu_bits[] = {
+       { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
+       { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
+       { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
+};
+
 static int wm8994_drc_base[] = {
        WM8994_AIF1_DRC1_1,
        WM8994_AIF1_DRC2_1,
@@ -1030,6 +1036,26 @@ static bool wm8994_check_class_w_digital(struct snd_soc_component *component)
        return true;
 }
 
+static void wm8994_update_vu_bits(struct snd_soc_component *component)
+{
+       struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
+       struct wm8994 *control = wm8994->wm8994;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
+               snd_soc_component_write(component, wm8994_vu_bits[i].reg,
+                                       snd_soc_component_read(component,
+                                                      wm8994_vu_bits[i].reg));
+       if (control->type == WM1811)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++)
+               snd_soc_component_write(component,
+                               wm8994_adc2_dac2_vu_bits[i].reg,
+                               snd_soc_component_read(component,
+                                       wm8994_adc2_dac2_vu_bits[i].reg));
+}
+
 static int aif_mclk_set(struct snd_soc_component *component, int aif, bool enable)
 {
        struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
@@ -1076,7 +1102,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
        struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
        struct wm8994 *control = wm8994->wm8994;
        int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
-       int ret, i;
+       int ret;
        int dac;
        int adc;
        int val;
@@ -1144,10 +1170,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
                break;
 
        case SND_SOC_DAPM_POST_PMU:
-               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
-                       snd_soc_component_write(component, wm8994_vu_bits[i].reg,
-                                     snd_soc_component_read(component,
-                                                  wm8994_vu_bits[i].reg));
+               wm8994_update_vu_bits(component);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
@@ -1181,7 +1204,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                      struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-       int ret, i;
+       int ret;
        int dac;
        int adc;
        int val;
@@ -1237,10 +1260,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
                break;
 
        case SND_SOC_DAPM_POST_PMU:
-               for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
-                       snd_soc_component_write(component, wm8994_vu_bits[i].reg,
-                                     snd_soc_component_read(component,
-                                                  wm8994_vu_bits[i].reg));
+               wm8994_update_vu_bits(component);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
@@ -4346,6 +4366,14 @@ static int wm8994_component_probe(struct snd_soc_component *component)
                                    wm8994_vu_bits[i].mask,
                                    wm8994_vu_bits[i].mask);
 
+       if (control->type != WM1811) {
+               for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++)
+                       snd_soc_component_update_bits(component,
+                                       wm8994_adc2_dac2_vu_bits[i].reg,
+                                       wm8994_adc2_dac2_vu_bits[i].mask,
+                                       wm8994_adc2_dac2_vu_bits[i].mask);
+       }
+
        /* Set the low bit of the 3D stereo depth so TLV matches */
        snd_soc_component_update_bits(component, WM8994_AIF1_DAC1_FILTERS_2,
                            1 << WM8994_AIF1DAC1_3D_GAIN_SHIFT,
index de136c0..52adedc 100644 (file)
@@ -73,6 +73,7 @@ struct cpu_priv {
  * @codec_priv: CODEC private data
  * @cpu_priv: CPU private data
  * @card: ASoC card structure
+ * @streams: Mask of current active streams
  * @sample_rate: Current sample rate
  * @sample_format: Current sample format
  * @asrc_rate: ASRC sample rate used by Back-Ends
@@ -89,6 +90,7 @@ struct fsl_asoc_card_priv {
        struct codec_priv codec_priv;
        struct cpu_priv cpu_priv;
        struct snd_soc_card card;
+       u8 streams;
        u32 sample_rate;
        snd_pcm_format_t sample_format;
        u32 asrc_rate;
@@ -151,21 +153,17 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
        struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       struct codec_priv *codec_priv = &priv->codec_priv;
        struct cpu_priv *cpu_priv = &priv->cpu_priv;
        struct device *dev = rtd->card->dev;
+       unsigned int pll_out;
        int ret;
 
        priv->sample_rate = params_rate(params);
        priv->sample_format = params_format(params);
+       priv->streams |= BIT(substream->stream);
 
-       /*
-        * If codec-dai is DAI Master and all configurations are already in the
-        * set_bias_level(), bypass the remaining settings in hw_params().
-        * Note: (dai_fmt & CBM_CFM) includes CBM_CFM and CBM_CFS.
-        */
-       if ((priv->card.set_bias_level &&
-            priv->dai_fmt & SND_SOC_DAIFMT_CBM_CFM) ||
-           fsl_asoc_card_is_ac97(priv))
+       if (fsl_asoc_card_is_ac97(priv))
                return 0;
 
        /* Specific configurations of DAIs starts from here */
@@ -174,7 +172,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
                                     cpu_priv->sysclk_dir[tx]);
        if (ret && ret != -ENOTSUPP) {
                dev_err(dev, "failed to set sysclk for cpu dai\n");
-               return ret;
+               goto fail;
        }
 
        if (cpu_priv->slot_width) {
@@ -182,6 +180,68 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
                                               cpu_priv->slot_width);
                if (ret && ret != -ENOTSUPP) {
                        dev_err(dev, "failed to set TDM slot for cpu dai\n");
+                       goto fail;
+               }
+       }
+
+       /* Specific configuration for PLL */
+       if (codec_priv->pll_id && codec_priv->fll_id) {
+               if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
+                       pll_out = priv->sample_rate * 384;
+               else
+                       pll_out = priv->sample_rate * 256;
+
+               ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0),
+                                         codec_priv->pll_id,
+                                         codec_priv->mclk_id,
+                                         codec_priv->mclk_freq, pll_out);
+               if (ret) {
+                       dev_err(dev, "failed to start FLL: %d\n", ret);
+                       goto fail;
+               }
+
+               ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
+                                            codec_priv->fll_id,
+                                            pll_out, SND_SOC_CLOCK_IN);
+
+               if (ret && ret != -ENOTSUPP) {
+                       dev_err(dev, "failed to set SYSCLK: %d\n", ret);
+                       goto fail;
+               }
+       }
+
+       return 0;
+
+fail:
+       priv->streams &= ~BIT(substream->stream);
+       return ret;
+}
+
+static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+       struct codec_priv *codec_priv = &priv->codec_priv;
+       struct device *dev = rtd->card->dev;
+       int ret;
+
+       priv->streams &= ~BIT(substream->stream);
+
+       if (!priv->streams && codec_priv->pll_id && codec_priv->fll_id) {
+               /* Force freq to be 0 to avoid error message in codec */
+               ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
+                                            codec_priv->mclk_id,
+                                            0,
+                                            SND_SOC_CLOCK_IN);
+               if (ret) {
+                       dev_err(dev, "failed to switch away from FLL: %d\n", ret);
+                       return ret;
+               }
+
+               ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0),
+                                         codec_priv->pll_id, 0, 0, 0);
+               if (ret && ret != -ENOTSUPP) {
+                       dev_err(dev, "failed to stop FLL: %d\n", ret);
                        return ret;
                }
        }
@@ -191,6 +251,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
 
 static const struct snd_soc_ops fsl_asoc_card_ops = {
        .hw_params = fsl_asoc_card_hw_params,
+       .hw_free = fsl_asoc_card_hw_free,
 };
 
 static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -254,75 +315,6 @@ static struct snd_soc_dai_link fsl_asoc_card_dai[] = {
        },
 };
 
-static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
-                                       struct snd_soc_dapm_context *dapm,
-                                       enum snd_soc_bias_level level)
-{
-       struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card);
-       struct snd_soc_pcm_runtime *rtd;
-       struct snd_soc_dai *codec_dai;
-       struct codec_priv *codec_priv = &priv->codec_priv;
-       struct device *dev = card->dev;
-       unsigned int pll_out;
-       int ret;
-
-       rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]);
-       codec_dai = asoc_rtd_to_codec(rtd, 0);
-       if (dapm->dev != codec_dai->dev)
-               return 0;
-
-       switch (level) {
-       case SND_SOC_BIAS_PREPARE:
-               if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
-                       break;
-
-               if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
-                       pll_out = priv->sample_rate * 384;
-               else
-                       pll_out = priv->sample_rate * 256;
-
-               ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id,
-                                         codec_priv->mclk_id,
-                                         codec_priv->mclk_freq, pll_out);
-               if (ret) {
-                       dev_err(dev, "failed to start FLL: %d\n", ret);
-                       return ret;
-               }
-
-               ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->fll_id,
-                                            pll_out, SND_SOC_CLOCK_IN);
-               if (ret && ret != -ENOTSUPP) {
-                       dev_err(dev, "failed to set SYSCLK: %d\n", ret);
-                       return ret;
-               }
-               break;
-
-       case SND_SOC_BIAS_STANDBY:
-               if (dapm->bias_level != SND_SOC_BIAS_PREPARE)
-                       break;
-
-               ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
-                                            codec_priv->mclk_freq,
-                                            SND_SOC_CLOCK_IN);
-               if (ret && ret != -ENOTSUPP) {
-                       dev_err(dev, "failed to switch away from FLL: %d\n", ret);
-                       return ret;
-               }
-
-               ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id, 0, 0, 0);
-               if (ret) {
-                       dev_err(dev, "failed to stop FLL: %d\n", ret);
-                       return ret;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       return 0;
-}
-
 static int fsl_asoc_card_audmux_init(struct device_node *np,
                                     struct fsl_asoc_card_priv *priv)
 {
@@ -611,7 +603,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        /* Diversify the card configurations */
        if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
                codec_dai_name = "cs42888";
-               priv->card.set_bias_level = NULL;
                priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
                priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
                priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT;
@@ -628,26 +619,22 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
                priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
        } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
                codec_dai_name = "wm8962";
-               priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
                priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
                priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
                priv->codec_priv.pll_id = WM8962_FLL;
                priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
        } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
                codec_dai_name = "wm8960-hifi";
-               priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
                priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
                priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
                priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
        } else if (of_device_is_compatible(np, "fsl,imx-audio-ac97")) {
                codec_dai_name = "ac97-hifi";
-               priv->card.set_bias_level = NULL;
                priv->dai_fmt = SND_SOC_DAIFMT_AC97;
                priv->card.dapm_routes = audio_map_ac97;
                priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_ac97);
        } else if (of_device_is_compatible(np, "fsl,imx-audio-mqs")) {
                codec_dai_name = "fsl-mqs-dai";
-               priv->card.set_bias_level = NULL;
                priv->dai_fmt = SND_SOC_DAIFMT_LEFT_J |
                                SND_SOC_DAIFMT_CBS_CFS |
                                SND_SOC_DAIFMT_NB_NF;
@@ -657,7 +644,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
                priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_tx);
        } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8524")) {
                codec_dai_name = "wm8524-hifi";
-               priv->card.set_bias_level = NULL;
                priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
                priv->dai_link[1].dpcm_capture = 0;
                priv->dai_link[2].dpcm_capture = 0;
index 9e4f66b..2319848 100644 (file)
@@ -339,7 +339,6 @@ static int psc_dma_new(struct snd_soc_component *component,
 static void psc_dma_free(struct snd_soc_component *component,
                         struct snd_pcm *pcm)
 {
-       struct snd_soc_pcm_runtime *rtd = pcm->private_data;
        struct snd_pcm_substream *substream;
        int stream;
 
index 49b9f18..b1cac7a 100644 (file)
@@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
 
        ret_val = power_up_sst(stream);
        if (ret_val < 0)
-               return ret_val;
+               goto out_power_up;
 
        /* Make sure, that the period size is always even */
        snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
        return snd_pcm_hw_constraint_integer(runtime,
                         SNDRV_PCM_HW_PARAM_PERIODS);
 out_ops:
-       kfree(stream);
        mutex_unlock(&sst_lock);
+out_power_up:
+       kfree(stream);
        return ret_val;
 }
 
index 2a5302f..0168af8 100644 (file)
@@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
 }
 
 static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
-       SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
                        "Secondary MI2S Playback SD1",
-                       0, 0, 0, 0),
+                       0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
-                            0, 0, 0, 0),
+                            0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
-                                               0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
-                                               0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
+                                               0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0),
 };
 
 static const struct snd_soc_component_driver q6afe_dai_component = {
index eaa95b5..25d23e0 100644 (file)
@@ -973,6 +973,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
        return 0;
 }
 
+static unsigned int q6routing_reg_read(struct snd_soc_component *component,
+                                      unsigned int reg)
+{
+       /* default value */
+       return 0;
+}
+
+static int q6routing_reg_write(struct snd_soc_component *component,
+                              unsigned int reg, unsigned int val)
+{
+       /* dummy */
+       return 0;
+}
+
 static const struct snd_soc_component_driver msm_soc_routing_component = {
        .probe = msm_routing_probe,
        .name = DRV_NAME,
@@ -981,6 +995,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
        .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
        .dapm_routes = intercon,
        .num_dapm_routes = ARRAY_SIZE(intercon),
+       .read = q6routing_reg_read,
+       .write = q6routing_reg_write,
 };
 
 static int q6pcm_routing_probe(struct platform_device *pdev)
index f0b4f4b..5504b92 100644 (file)
@@ -406,7 +406,7 @@ static unsigned int soc_component_read_no_lock(
                ret = -EIO;
 
        if (ret < 0)
-               soc_component_ret(component, ret);
+               return soc_component_ret(component, ret);
 
        return val;
 }
index fe71171..0cbe31e 100644 (file)
@@ -71,7 +71,7 @@ static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int tegra186_dspk_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
 {
        struct tegra186_dspk *dspk = dev_get_drvdata(dev);
 
@@ -83,7 +83,7 @@ static int tegra186_dspk_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra186_dspk_runtime_resume(struct device *dev)
+static int __maybe_unused tegra186_dspk_runtime_resume(struct device *dev)
 {
        struct tegra186_dspk *dspk = dev_get_drvdata(dev);
        int err;
index 4894e8e..1268046 100644 (file)
@@ -219,7 +219,7 @@ static const struct regmap_config tegra186_admaif_regmap_config = {
        .cache_type             = REGCACHE_FLAT,
 };
 
-static int tegra_admaif_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_admaif_runtime_suspend(struct device *dev)
 {
        struct tegra_admaif *admaif = dev_get_drvdata(dev);
 
@@ -229,7 +229,7 @@ static int tegra_admaif_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra_admaif_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_admaif_runtime_resume(struct device *dev)
 {
        struct tegra_admaif *admaif = dev_get_drvdata(dev);
 
index 5123a96..66287a7 100644 (file)
@@ -564,7 +564,7 @@ static const struct of_device_id tegra_ahub_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_ahub_of_match);
 
-static int tegra_ahub_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_ahub_runtime_suspend(struct device *dev)
 {
        struct tegra_ahub *ahub = dev_get_drvdata(dev);
 
@@ -576,7 +576,7 @@ static int tegra_ahub_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra_ahub_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_ahub_runtime_resume(struct device *dev)
 {
        struct tegra_ahub *ahub = dev_get_drvdata(dev);
        int err;
index d682414..a661f40 100644 (file)
@@ -40,7 +40,7 @@ static const struct reg_default tegra210_dmic_reg_defaults[] = {
        { TEGRA210_DMIC_LP_BIQUAD_1_COEF_4, 0x0 },
 };
 
-static int tegra210_dmic_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra210_dmic_runtime_suspend(struct device *dev)
 {
        struct tegra210_dmic *dmic = dev_get_drvdata(dev);
 
@@ -52,7 +52,7 @@ static int tegra210_dmic_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra210_dmic_runtime_resume(struct device *dev)
+static int __maybe_unused tegra210_dmic_runtime_resume(struct device *dev)
 {
        struct tegra210_dmic *dmic = dev_get_drvdata(dev);
        int err;
index 7220921..a383bd5 100644 (file)
@@ -164,7 +164,7 @@ static int tegra210_i2s_init(struct snd_soc_dapm_widget *w,
        return tegra210_i2s_sw_reset(compnt, is_playback);
 }
 
-static int tegra210_i2s_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra210_i2s_runtime_suspend(struct device *dev)
 {
        struct tegra210_i2s *i2s = dev_get_drvdata(dev);
 
@@ -176,7 +176,7 @@ static int tegra210_i2s_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int tegra210_i2s_runtime_resume(struct device *dev)
+static int __maybe_unused tegra210_i2s_runtime_resume(struct device *dev)
 {
        struct tegra210_i2s *i2s = dev_get_drvdata(dev);
        int err;
index de43267..5351d71 100644 (file)
@@ -137,6 +137,7 @@ struct snd_usb_substream {
        unsigned int tx_length_quirk:1; /* add length specifier to transfers */
        unsigned int fmt_type;          /* USB audio format type (1-3) */
        unsigned int pkt_offset_adj;    /* Bytes to drop from beginning of packets (for non-compliant devices) */
+       unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
 
        unsigned int running: 1;        /* running status */
 
index 6b0f3a8..81e987e 100644 (file)
@@ -2371,7 +2371,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
        int num_ins;
        struct usb_mixer_elem_info *cval;
        struct snd_kcontrol *kctl;
-       int i, err, nameid, type, len;
+       int i, err, nameid, type, len, val;
        const struct procunit_info *info;
        const struct procunit_value_info *valinfo;
        const struct usbmix_name_map *map;
@@ -2474,6 +2474,12 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
                        break;
                }
 
+               err = get_cur_ctl_value(cval, cval->control << 8, &val);
+               if (err < 0) {
+                       usb_mixer_elem_info_free(cval);
+                       return -EINVAL;
+               }
+
                kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
                if (!kctl) {
                        usb_mixer_elem_info_free(cval);
index c369c81..5b43e9e 100644 (file)
@@ -371,6 +371,7 @@ static const struct usbmix_name_map asus_rog_map[] = {
 };
 
 static const struct usbmix_name_map lenovo_p620_rear_map[] = {
+       { 19, NULL, 2 }, /* FU, Volume */
        { 19, NULL, 12 }, /* FU, Input Gain Pad */
        {}
 };
index cec1cfd..199cdbf 100644 (file)
@@ -185,6 +185,7 @@ static const struct rc_config {
        { USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
        { USB_ID(0x041e, 0x30df), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
        { USB_ID(0x041e, 0x3237), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
+       { USB_ID(0x041e, 0x3263), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
        { USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
 };
 
index 986145f..a4d4d71 100644 (file)
@@ -329,7 +329,7 @@ static int snd_us16x08_bus_put(struct snd_kcontrol *kcontrol,
                elem->cached |= 1;
                elem->cache_val[0] = val;
        } else {
-               usb_audio_dbg(chip, "Failed to set buss param, err:%d\n", err);
+               usb_audio_dbg(chip, "Failed to set bus parameter, err:%d\n", err);
        }
 
        return err > 0 ? 1 : 0;
index 415bfec..5600751 100644 (file)
@@ -1420,6 +1420,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
                        // continue;
                }
                bytes = urb->iso_frame_desc[i].actual_length;
+               if (subs->stream_offset_adj > 0) {
+                       unsigned int adj = min(subs->stream_offset_adj, bytes);
+                       cp += adj;
+                       bytes -= adj;
+                       subs->stream_offset_adj -= adj;
+               }
                frames = bytes / stride;
                if (!subs->txfr_quirk)
                        bytes = frames * stride;
index adb3b62..f4fb002 100644 (file)
@@ -2680,6 +2680,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                .data = (const struct snd_usb_audio_quirk[]) {
                        {
                                .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 0,
                                .type = QUIRK_AUDIO_FIXED_ENDPOINT,
                                .data = &(const struct audioformat) {
                                        .formats = SNDRV_PCM_FMTBIT_S24_3LE,
@@ -2690,6 +2694,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
                                        .endpoint = 0x01,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .datainterval = 1,
+                                       .maxpacksize = 0x024c,
+                                       .rates = SNDRV_PCM_RATE_44100 |
+                                                SNDRV_PCM_RATE_48000,
+                                       .rate_min = 44100,
+                                       .rate_max = 48000,
+                                       .nr_rates = 2,
+                                       .rate_table = (unsigned int[]) {
+                                               44100, 48000
+                                       }
+                               }
+                       },
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+                                       .channels = 2,
+                                       .iface = 0,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .datainterval = 1,
+                                       .maxpacksize = 0x0126,
                                        .rates = SNDRV_PCM_RATE_44100 |
                                                 SNDRV_PCM_RATE_48000,
                                        .rate_min = 44100,
@@ -3558,6 +3588,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                }
        }
 },
+{
+       /*
+        * PIONEER DJ DDJ-RB
+        * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed
+        * The feedback for the output is the dummy input.
+        */
+       USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+                                       .channels = 4,
+                                       .iface = 0,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x01,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
+                                                  USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_44100,
+                                       .rate_min = 44100,
+                                       .rate_max = 44100,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 44100 }
+                               }
+                       },
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+                                       .channels = 2,
+                                       .iface = 0,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
+                                                USB_ENDPOINT_SYNC_ASYNC|
+                                                USB_ENDPOINT_USAGE_IMPLICIT_FB,
+                                       .rates = SNDRV_PCM_RATE_44100,
+                                       .rate_min = 44100,
+                                       .rate_max = 44100,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 44100 }
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 
 #define ALC1220_VB_DESKTOP(vend, prod) { \
        USB_DEVICE(vend, prod), \
@@ -3658,11 +3744,17 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
  * they pretend to be 96kHz mono as a workaround for stereo being broken
  * by that...
  *
- * They also have swapped L-R channels, but that's for userspace to deal
- * with.
+ * They also have an issue with initial stream alignment that causes the
+ * channels to be swapped and out of phase, which is dealt with in quirks.c.
  */
 {
-       USB_DEVICE(0x534d, 0x2109),
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+                      USB_DEVICE_ID_MATCH_INT_CLASS |
+                      USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+       .idVendor = 0x534d,
+       .idProduct = 0x2109,
+       .bInterfaceClass = USB_CLASS_AUDIO,
+       .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
        .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
                .vendor_name = "MacroSilicon",
                .product_name = "MS2109",
index c551141..abf99b8 100644 (file)
@@ -1495,6 +1495,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
                pioneer_djm_set_format_quirk(subs);
                break;
+       case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+               subs->stream_offset_adj = 2;
+               break;
        }
 }
 
index 4d1e657..ca76ba5 100644 (file)
@@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
        subs->tx_length_quirk = as->chip->tx_length_quirk;
        subs->speed = snd_usb_get_speed(subs->dev);
        subs->pkt_offset_adj = 0;
+       subs->stream_offset_adj = 0;
 
        snd_usb_set_pcm_ops(as->pcm, stream);
 
index f599064..bdf5f10 100644 (file)
@@ -48,6 +48,24 @@ enum perf_event_powerpc_regs {
        PERF_REG_POWERPC_DSISR,
        PERF_REG_POWERPC_SIER,
        PERF_REG_POWERPC_MMCRA,
-       PERF_REG_POWERPC_MAX,
+       /* Extended registers */
+       PERF_REG_POWERPC_MMCR0,
+       PERF_REG_POWERPC_MMCR1,
+       PERF_REG_POWERPC_MMCR2,
+       PERF_REG_POWERPC_MMCR3,
+       PERF_REG_POWERPC_SIER2,
+       PERF_REG_POWERPC_SIER3,
+       /* Max regs without the extended regs */
+       PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
 };
+
+#define PERF_REG_PMU_MASK      ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */
+#define PERF_REG_PMU_MASK_300   (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK)
+/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */
+#define PERF_REG_PMU_MASK_31   (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK)
+
+#define PERF_REG_MAX_ISA_300   (PERF_REG_POWERPC_MMCR2 + 1)
+#define PERF_REG_MAX_ISA_31    (PERF_REG_POWERPC_SIER3 + 1)
 #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
index 436ec76..7a6b148 100644 (file)
@@ -231,11 +231,13 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
 #define KVM_SYNC_ETOKEN (1UL << 11)
+#define KVM_SYNC_DIAG318 (1UL << 12)
 
 #define KVM_SYNC_S390_VALID_FIELDS \
        (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
         KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
-        KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
+        KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN | \
+        KVM_SYNC_DIAG318)
 
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
@@ -264,7 +266,8 @@ struct kvm_sync_regs {
        __u8 reserved2 : 7;
        __u8 padding1[51];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
-       __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
+       __u64 diag318;          /* diagnose 0x318 info */
+       __u8 padding2[184];     /* sdnx needs to be 256byte aligned */
        union {
                __u8 sdnx[SDNXL];  /* state description annex */
                struct {
index 02dabc9..2901d5d 100644 (file)
@@ -96,6 +96,7 @@
 #define X86_FEATURE_SYSCALL32          ( 3*32+14) /* "" syscall in IA32 userspace */
 #define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
+/* free                                        ( 3*32+17) */
 #define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
 #define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
 #define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_EXTD_APICID                ( 3*32+26) /* Extended APICID (8 bits) */
 #define X86_FEATURE_AMD_DCM            ( 3*32+27) /* AMD multi-node processor */
 #define X86_FEATURE_APERFMPERF         ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+/* free                                        ( 3*32+29) */
 #define X86_FEATURE_NONSTOP_TSC_S3     ( 3*32+30) /* TSC doesn't stop in S3 state */
 #define X86_FEATURE_TSC_KNOWN_FREQ     ( 3*32+31) /* TSC has known frequency */
 
 #define X86_FEATURE_SRBDS_CTRL         (18*32+ 9) /* "" SRBDS mitigation MSR available */
 #define X86_FEATURE_MD_CLEAR           (18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_SERIALIZE          (18*32+14) /* SERIALIZE instruction */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_ARCH_LBR           (18*32+19) /* Intel ARCH LBR */
 #define X86_FEATURE_SPEC_CTRL          (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_FLUSH_L1D          (18*32+28) /* Flush L1D cache */
index e8370e6..2859ee4 100644 (file)
 
 #define MSR_LBR_SELECT                 0x000001c8
 #define MSR_LBR_TOS                    0x000001c9
+
+#define MSR_IA32_POWER_CTL             0x000001fc
+#define MSR_IA32_POWER_CTL_BIT_EE      19
+
 #define MSR_LBR_NHM_FROM               0x00000680
 #define MSR_LBR_NHM_TO                 0x000006c0
 #define MSR_LBR_CORE_FROM              0x00000040
 #define LBR_INFO_MISPRED               BIT_ULL(63)
 #define LBR_INFO_IN_TX                 BIT_ULL(62)
 #define LBR_INFO_ABORT                 BIT_ULL(61)
+#define LBR_INFO_CYC_CNT_VALID         BIT_ULL(60)
 #define LBR_INFO_CYCLES                        0xffff
+#define LBR_INFO_BR_TYPE_OFFSET                56
+#define LBR_INFO_BR_TYPE               (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+
+#define MSR_ARCH_LBR_CTL               0x000014ce
+#define ARCH_LBR_CTL_LBREN             BIT(0)
+#define ARCH_LBR_CTL_CPL_OFFSET                1
+#define ARCH_LBR_CTL_CPL               (0x3ull << ARCH_LBR_CTL_CPL_OFFSET)
+#define ARCH_LBR_CTL_STACK_OFFSET      3
+#define ARCH_LBR_CTL_STACK             (0x1ull << ARCH_LBR_CTL_STACK_OFFSET)
+#define ARCH_LBR_CTL_FILTER_OFFSET     16
+#define ARCH_LBR_CTL_FILTER            (0x7full << ARCH_LBR_CTL_FILTER_OFFSET)
+#define MSR_ARCH_LBR_DEPTH             0x000014cf
+#define MSR_ARCH_LBR_FROM_0            0x00001500
+#define MSR_ARCH_LBR_TO_0              0x00001600
+#define MSR_ARCH_LBR_INFO_0            0x00001200
 
 #define MSR_IA32_PEBS_ENABLE           0x000003f1
 #define MSR_PEBS_DATA_CFG              0x000003f2
 
 #define MSR_PEBS_FRONTEND              0x000003f7
 
-#define MSR_IA32_POWER_CTL             0x000001fc
-
 #define MSR_IA32_MC0_CTL               0x00000400
 #define MSR_IA32_MC0_STATUS            0x00000401
 #define MSR_IA32_MC0_ADDR              0x00000402
 #define MSR_AMD64_PATCH_LEVEL          0x0000008b
 #define MSR_AMD64_TSC_RATIO            0xc0000104
 #define MSR_AMD64_NB_CFG               0xc001001f
-#define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_PATCH_LOADER         0xc0010020
 #define MSR_AMD_PERF_CTL               0xc0010062
 #define MSR_AMD_PERF_STATUS            0xc0010063
 #define MSR_AMD64_OSVW_STATUS          0xc0010141
 #define MSR_AMD_PPIN_CTL               0xc00102f0
 #define MSR_AMD_PPIN                   0xc00102f1
+#define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_LS_CFG               0xc0011020
 #define MSR_AMD64_DC_CFG               0xc0011022
 #define MSR_AMD64_BU_CFG2              0xc001102a
 #define MSR_F16H_DR0_ADDR_MASK         0xc0011027
 
 /* Fam 15h MSRs */
+#define MSR_F15H_CU_PWR_ACCUMULATOR     0xc001007a
+#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
 #define MSR_F15H_PERF_CTL              0xc0010200
 #define MSR_F15H_PERF_CTL0             MSR_F15H_PERF_CTL
 #define MSR_F15H_PERF_CTL1             (MSR_F15H_PERF_CTL + 2)
index ede162f..0e93107 100644 (file)
@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
        if (!info->btf_id || !info->nr_func_info ||
            btf__get_from_id(info->btf_id, &prog_btf))
                goto print;
-       finfo = (struct bpf_func_info *)info->func_info;
+       finfo = u64_to_ptr(info->func_info);
        func_type = btf__type_by_id(prog_btf, finfo->type_id);
        if (!func_type || !btf_is_func(func_type))
                goto print;
index 8a4c2b3..f611846 100644 (file)
@@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
                              var_name, align);
                        return -EINVAL;
                }
+               /* Assume 32-bit architectures when generating data section
+                * struct memory layout. Given bpftool can't know which target
+                * host architecture it's emitting skeleton for, we need to be
+                * conservative and assume 32-bit one to ensure enough padding
+                * bytes are generated for pointer and long types. This will
+                * still work correctly for 64-bit architectures, because in
+                * the worst case we'll generate unnecessary padding field,
+                * which on 64-bit architectures is not strictly necessary and
+                * would be handled by natural 8-byte alignment. But it still
+                * will be a correct memory layout, based on recorded offsets
+                * in BTF.
+                */
+               if (align > 4)
+                       align = 4;
 
                align_off = (off + align - 1) / align * align;
                if (align_off != need_off) {
@@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv)
                {                                                           \n\
                        struct %1$s *obj;                                   \n\
                                                                            \n\
-                       obj = (typeof(obj))calloc(1, sizeof(*obj));         \n\
+                       obj = (struct %1$s *)calloc(1, sizeof(*obj));       \n\
                        if (!obj)                                           \n\
                                return NULL;                                \n\
                        if (%1$s__create_skeleton(obj))                     \n\
@@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv)
                {                                                           \n\
                        struct bpf_object_skeleton *s;                      \n\
                                                                            \n\
-                       s = (typeof(s))calloc(1, sizeof(*s));               \n\
+                       s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
                        if (!s)                                             \n\
                                return -1;                                  \n\
                        obj->skeleton = s;                                  \n\
@@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv)
                                /* maps */                                  \n\
                                s->map_cnt = %zu;                           \n\
                                s->map_skel_sz = sizeof(*s->maps);          \n\
-                               s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
+                               s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
                                if (!s->maps)                               \n\
                                        goto err;                           \n\
                        ",
@@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv)
                                /* programs */                              \n\
                                s->prog_cnt = %zu;                          \n\
                                s->prog_skel_sz = sizeof(*s->progs);        \n\
-                               s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
+                               s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
                                if (!s->progs)                              \n\
                                        goto err;                           \n\
                        ",
index c9dba75..3b1aad7 100644 (file)
@@ -11,6 +11,7 @@
 static int do_pin(int argc, char **argv)
 {
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
+       union bpf_iter_link_info linfo;
        const char *objfile, *path;
        struct bpf_program *prog;
        struct bpf_object *obj;
@@ -36,6 +37,11 @@ static int do_pin(int argc, char **argv)
                        map_fd = map_parse_fd(&argc, &argv);
                        if (map_fd < 0)
                                return -1;
+
+                       memset(&linfo, 0, sizeof(linfo));
+                       linfo.map.map_fd = map_fd;
+                       iter_opts.link_info = &linfo;
+                       iter_opts.link_info_len = sizeof(linfo);
                }
        }
 
@@ -57,9 +63,6 @@ static int do_pin(int argc, char **argv)
                goto close_obj;
        }
 
-       if (map_fd >= 0)
-               iter_opts.map_fd = map_fd;
-
        link = bpf_program__attach_iter(prog, &iter_opts);
        if (IS_ERR(link)) {
                err = PTR_ERR(link);
index 1b79375..a89f09e 100644 (file)
@@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
        switch (info->type) {
        case BPF_LINK_TYPE_RAW_TRACEPOINT:
                jsonw_string_field(json_wtr, "tp_name",
-                                  (const char *)info->raw_tracepoint.tp_name);
+                                  u64_to_ptr(info->raw_tracepoint.tp_name));
                break;
        case BPF_LINK_TYPE_TRACING:
                err = get_prog_info(info->prog_id, &prog_info);
@@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
        switch (info->type) {
        case BPF_LINK_TYPE_RAW_TRACEPOINT:
                printf("\n\ttp '%s'  ",
-                      (const char *)info->raw_tracepoint.tp_name);
+                      (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
                break;
        case BPF_LINK_TYPE_TRACING:
                err = get_prog_info(info->prog_id, &prog_info);
index e3a79b5..c46e521 100644 (file)
 /* Make sure we do not use kernel-only integer typedefs */
 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 
-#define ptr_to_u64(ptr)        ((__u64)(unsigned long)(ptr))
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+       return (__u64)(unsigned long)ptr;
+}
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+       return (void *)(unsigned long)ptr;
+}
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
index 158995d..d393eb8 100644 (file)
@@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                        p_info("no instructions returned");
                        return -1;
                }
-               buf = (unsigned char *)(info->jited_prog_insns);
+               buf = u64_to_ptr(info->jited_prog_insns);
                member_len = info->jited_prog_len;
        } else {        /* DUMP_XLATED */
                if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
                        p_err("error retrieving insn dump: kernel.kptr_restrict set?");
                        return -1;
                }
-               buf = (unsigned char *)info->xlated_prog_insns;
+               buf = u64_to_ptr(info->xlated_prog_insns);
                member_len = info->xlated_prog_len;
        }
 
@@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                return -1;
        }
 
-       func_info = (void *)info->func_info;
+       func_info = u64_to_ptr(info->func_info);
 
        if (info->nr_line_info) {
                prog_linfo = bpf_prog_linfo__new(info);
@@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 
                n = write(fd, buf, member_len);
                close(fd);
-               if (n != member_len) {
+               if (n != (ssize_t)member_len) {
                        p_err("error writing output file: %s",
                              n < 0 ? strerror(errno) : "short write");
                        return -1;
@@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                        __u32 i;
                        if (info->nr_jited_ksyms) {
                                kernel_syms_load(&dd);
-                               ksyms = (__u64 *) info->jited_ksyms;
+                               ksyms = u64_to_ptr(info->jited_ksyms);
                        }
 
                        if (json_output)
                                jsonw_start_array(json_wtr);
 
-                       lens = (__u32 *) info->jited_func_lens;
+                       lens = u64_to_ptr(info->jited_func_lens);
                        for (i = 0; i < info->nr_jited_func_lens; i++) {
                                if (ksyms) {
                                        sym = kernel_syms_search(&dd, ksyms[i]);
@@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
        } else {
                kernel_syms_load(&dd);
                dd.nr_jited_ksyms = info->nr_jited_ksyms;
-               dd.jited_ksyms = (__u64 *) info->jited_ksyms;
+               dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
                dd.btf = btf;
                dd.func_info = func_info;
                dd.finfo_rec_size = info->func_info_rec_size;
@@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd)
                goto out;
        }
 
-       func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+       func_info = u64_to_ptr(info_linear->info.func_info);
        t = btf__type_by_id(btf, func_info[0].type_id);
        if (!t) {
                p_err("btf %d doesn't have type %d",
index 52d8833..4d9ecb9 100644 (file)
@@ -566,6 +566,7 @@ static int sets_patch(struct object *obj)
 
                next = rb_next(next);
        }
+       return 0;
 }
 
 static int symbols_patch(struct object *obj)
index cb15237..c1daf4d 100644 (file)
@@ -8,7 +8,7 @@ endif
 
 feature_check = $(eval $(feature_check_code))
 define feature_check_code
-  feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+  feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
 endef
 
 feature_set = $(eval $(feature_set_code))
@@ -98,7 +98,8 @@ FEATURE_TESTS_EXTRA :=                  \
          llvm-version                   \
          clang                          \
          libbpf                         \
-         libpfm4
+         libpfm4                        \
+         libdebuginfod
 
 FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
 
index 88371f7..d220fe9 100644 (file)
@@ -26,6 +26,7 @@ FILES=                                          \
          test-libelf-gelf_getnote.bin           \
          test-libelf-getshdrstrndx.bin          \
          test-libelf-mmap.bin                   \
+         test-libdebuginfod.bin                 \
          test-libnuma.bin                       \
          test-numa_num_possible_cpus.bin        \
          test-libperl.bin                       \
@@ -74,8 +75,6 @@ FILES=                                          \
 
 FILES := $(addprefix $(OUTPUT),$(FILES))
 
-CC ?= $(CROSS_COMPILE)gcc
-CXX ?= $(CROSS_COMPILE)g++
 PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
 LLVM_CONFIG ?= llvm-config
 CLANG ?= clang
@@ -159,6 +158,9 @@ $(OUTPUT)test-libelf-gelf_getnote.bin:
 $(OUTPUT)test-libelf-getshdrstrndx.bin:
        $(BUILD) -lelf
 
+$(OUTPUT)test-libdebuginfod.bin:
+       $(BUILD) -ldebuginfod
+
 $(OUTPUT)test-libnuma.bin:
        $(BUILD) -lnuma
 
diff --git a/tools/build/feature/test-libdebuginfod.c b/tools/build/feature/test-libdebuginfod.c
new file mode 100644 (file)
index 0000000..da22548
--- /dev/null
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <elfutils/debuginfod.h>
+
+int main(void)
+{
+       debuginfod_client* c = debuginfod_begin();
+       return (long)c;
+}
index c8c189a..995b36c 100644 (file)
@@ -850,6 +850,8 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
 #define __NR_clone3 435
 __SYSCALL(__NR_clone3, sys_clone3)
 #endif
+#define __NR_close_range 436
+__SYSCALL(__NR_close_range, sys_close_range)
 
 #define __NR_openat2 437
 __SYSCALL(__NR_openat2, sys_openat2)
index 14b67cd..0054606 100644 (file)
@@ -55,7 +55,7 @@ extern "C" {
  *     cause the related events to not be seen.
  *
  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
- *     the GPU. The value supplied with the event is always 1. NOTE: Disable
+ *     GPU. The value supplied with the event is always 1. NOTE: Disable
  *     reset via module parameter will cause this event to not be seen.
  */
 #define I915_L3_PARITY_UEVENT          "L3_PARITY_ERROR"
@@ -1934,7 +1934,7 @@ enum drm_i915_perf_property_id {
 
        /**
         * The value specifies which set of OA unit metrics should be
-        * be configured, defining the contents of any OA unit reports.
+        * configured, defining the contents of any OA unit reports.
         *
         * This property is available in perf revision 1.
         */
index b134e67..0480f89 100644 (file)
@@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
        __u32   attach_type;            /* program attach type */
 };
 
+union bpf_iter_link_info {
+       struct {
+               __u32   map_fd;
+       } map;
+};
+
 /* BPF syscall commands, see bpf(2) man-page for details. */
 enum bpf_cmd {
        BPF_MAP_CREATE,
@@ -249,13 +255,6 @@ enum bpf_link_type {
        MAX_BPF_LINK_TYPE,
 };
 
-enum bpf_iter_link_info {
-       BPF_ITER_LINK_UNSPEC = 0,
-       BPF_ITER_LINK_MAP_FD = 1,
-
-       MAX_BPF_ITER_LINK_INFO,
-};
-
 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
  *
  * NONE(default): No further bpf programs allowed in the subtree.
@@ -623,6 +622,8 @@ union bpf_attr {
                };
                __u32           attach_type;    /* attach type */
                __u32           flags;          /* extra flags */
+               __aligned_u64   iter_info;      /* extra bpf_iter_link_info */
+               __u32           iter_info_len;  /* iter_info length */
        } link_create;
 
        struct { /* struct used by BPF_LINK_UPDATE command */
index 8533bf0..3d0d823 100644 (file)
@@ -123,6 +123,7 @@ struct in_addr {
 #define IP_CHECKSUM    23
 #define IP_BIND_ADDRESS_NO_PORT        24
 #define IP_RECVFRAGSIZE        25
+#define IP_RECVERR_RFC4884     26
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
index 4fdf303..f6d8603 100644 (file)
@@ -289,6 +289,7 @@ struct kvm_run {
                /* KVM_EXIT_FAIL_ENTRY */
                struct {
                        __u64 hardware_entry_failure_reason;
+                       __u32 cpu;
                } fail_entry;
                /* KVM_EXIT_EXCEPTION */
                struct {
@@ -1031,6 +1032,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_PPC_SECURE_GUEST 181
 #define KVM_CAP_HALT_POLL 182
 #define KVM_CAP_ASYNC_PF_INT 183
+#define KVM_CAP_LAST_CPU 184
+#define KVM_CAP_SMALLER_MAXPHYADDR 185
+#define KVM_CAP_S390_DIAG318 186
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 21a1edd..077e7ee 100644 (file)
@@ -383,7 +383,8 @@ struct perf_event_attr {
                                bpf_event      :  1, /* include bpf events */
                                aux_output     :  1, /* generate AUX records instead of events */
                                cgroup         :  1, /* include cgroup events */
-                               __reserved_1   : 31;
+                               text_poke      :  1, /* include text poke events */
+                               __reserved_1   : 30;
 
        union {
                __u32           wakeup_events;    /* wakeup every n events */
@@ -1041,12 +1042,35 @@ enum perf_event_type {
         */
        PERF_RECORD_CGROUP                      = 19,
 
+       /*
+        * Records changes to kernel text i.e. self-modified code. 'old_len' is
+        * the number of old bytes, 'new_len' is the number of new bytes. Either
+        * 'old_len' or 'new_len' may be zero to indicate, for example, the
+        * addition or removal of a trampoline. 'bytes' contains the old bytes
+        * followed immediately by the new bytes.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *      u64                             addr;
+        *      u16                             old_len;
+        *      u16                             new_len;
+        *      u8                              bytes[];
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_TEXT_POKE                   = 20,
+
        PERF_RECORD_MAX,                        /* non-ABI */
 };
 
 enum perf_record_ksymbol_type {
        PERF_RECORD_KSYMBOL_TYPE_UNKNOWN        = 0,
        PERF_RECORD_KSYMBOL_TYPE_BPF            = 1,
+       /*
+        * Out of line code such as kprobe-replaced instructions or optimized
+        * kprobes or ftrace trampolines.
+        */
+       PERF_RECORD_KSYMBOL_TYPE_OOL            = 2,
        PERF_RECORD_KSYMBOL_TYPE_MAX            /* non-ABI */
 };
 
index 0c23496..7523218 100644 (file)
@@ -91,6 +91,8 @@
 
 /* Use message type V2 */
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH  0x2
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
index 58d44d5..5e6cb9d 100644 (file)
@@ -8,6 +8,7 @@
 #include <poll.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <string.h>
 
 void fdarray__init(struct fdarray *fda, int nr_autogrow)
 {
@@ -19,7 +20,7 @@ void fdarray__init(struct fdarray *fda, int nr_autogrow)
 
 int fdarray__grow(struct fdarray *fda, int nr)
 {
-       void *priv;
+       struct priv *priv;
        int nr_alloc = fda->nr_alloc + nr;
        size_t psize = sizeof(fda->priv[0]) * nr_alloc;
        size_t size  = sizeof(struct pollfd) * nr_alloc;
@@ -34,6 +35,9 @@ int fdarray__grow(struct fdarray *fda, int nr)
                return -ENOMEM;
        }
 
+       memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr);
+       memset(&priv[fda->nr_alloc], 0, sizeof(fda->priv[0]) * nr);
+
        fda->nr_alloc = nr_alloc;
        fda->entries  = entries;
        fda->priv     = priv;
@@ -69,7 +73,7 @@ void fdarray__delete(struct fdarray *fda)
        free(fda);
 }
 
-int fdarray__add(struct fdarray *fda, int fd, short revents)
+int fdarray__add(struct fdarray *fda, int fd, short revents, enum fdarray_flags flags)
 {
        int pos = fda->nr;
 
@@ -79,6 +83,7 @@ int fdarray__add(struct fdarray *fda, int fd, short revents)
 
        fda->entries[fda->nr].fd     = fd;
        fda->entries[fda->nr].events = revents;
+       fda->priv[fda->nr].flags = flags;
        fda->nr++;
        return pos;
 }
@@ -93,22 +98,22 @@ int fdarray__filter(struct fdarray *fda, short revents,
                return 0;
 
        for (fd = 0; fd < fda->nr; ++fd) {
+               if (!fda->entries[fd].events)
+                       continue;
+
                if (fda->entries[fd].revents & revents) {
                        if (entry_destructor)
                                entry_destructor(fda, fd, arg);
 
+                       fda->entries[fd].revents = fda->entries[fd].events = 0;
                        continue;
                }
 
-               if (fd != nr) {
-                       fda->entries[nr] = fda->entries[fd];
-                       fda->priv[nr]    = fda->priv[fd];
-               }
-
-               ++nr;
+               if (!(fda->priv[fd].flags & fdarray_flag__nonfilterable))
+                       ++nr;
        }
 
-       return fda->nr = nr;
+       return nr;
 }
 
 int fdarray__poll(struct fdarray *fda, int timeout)
index b39557d..7fcf21a 100644 (file)
@@ -21,19 +21,27 @@ struct fdarray {
        int            nr_alloc;
        int            nr_autogrow;
        struct pollfd *entries;
-       union {
-               int    idx;
-               void   *ptr;
+       struct priv {
+               union {
+                       int    idx;
+                       void   *ptr;
+               };
+               unsigned int flags;
        } *priv;
 };
 
+enum fdarray_flags {
+       fdarray_flag__default       = 0x00000000,
+       fdarray_flag__nonfilterable = 0x00000001
+};
+
 void fdarray__init(struct fdarray *fda, int nr_autogrow);
 void fdarray__exit(struct fdarray *fda);
 
 struct fdarray *fdarray__new(int nr_alloc, int nr_autogrow);
 void fdarray__delete(struct fdarray *fda);
 
-int fdarray__add(struct fdarray *fda, int fd, short revents);
+int fdarray__add(struct fdarray *fda, int fd, short revents, enum fdarray_flags flags);
 int fdarray__poll(struct fdarray *fda, int timeout);
 int fdarray__filter(struct fdarray *fda, short revents,
                    void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
index eab14c9..0750681 100644 (file)
@@ -599,6 +599,9 @@ int bpf_link_create(int prog_fd, int target_fd,
        attr.link_create.target_fd = target_fd;
        attr.link_create.attach_type = attach_type;
        attr.link_create.flags = OPTS_GET(opts, flags, 0);
+       attr.link_create.iter_info =
+               ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
+       attr.link_create.iter_info_len = OPTS_GET(opts, iter_info_len, 0);
 
        return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
 }
index 28855fd..015d13f 100644 (file)
@@ -168,11 +168,14 @@ LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
 LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
                                enum bpf_attach_type type);
 
+union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
 struct bpf_link_create_opts {
        size_t sz; /* size of this struct for forward/backward compatibility */
        __u32 flags;
+       union bpf_iter_link_info *iter_info;
+       __u32 iter_info_len;
 };
-#define bpf_link_create_opts__last_field flags
+#define bpf_link_create_opts__last_field iter_info_len
 
 LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
                               enum bpf_attach_type attach_type,
index bc14db7..e9a4ecd 100644 (file)
@@ -40,7 +40,7 @@
  * Helper macro to manipulate data structures
  */
 #ifndef offsetof
-#define offsetof(TYPE, MEMBER)  __builtin_offsetof(TYPE, MEMBER)
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
 #endif
 #ifndef container_of
 #define container_of(ptr, type, member)                                \
index 856b09a..7dfca70 100644 (file)
@@ -41,6 +41,7 @@ struct btf {
        __u32 types_size;
        __u32 data_size;
        int fd;
+       int ptr_sz;
 };
 
 static inline __u64 ptr_to_u64(const void *ptr)
@@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
        return btf->types[type_id];
 }
 
+static int determine_ptr_size(const struct btf *btf)
+{
+       const struct btf_type *t;
+       const char *name;
+       int i;
+
+       for (i = 1; i <= btf->nr_types; i++) {
+               t = btf__type_by_id(btf, i);
+               if (!btf_is_int(t))
+                       continue;
+
+               name = btf__name_by_offset(btf, t->name_off);
+               if (!name)
+                       continue;
+
+               if (strcmp(name, "long int") == 0 ||
+                   strcmp(name, "long unsigned int") == 0) {
+                       if (t->size != 4 && t->size != 8)
+                               continue;
+                       return t->size;
+               }
+       }
+
+       return -1;
+}
+
+static size_t btf_ptr_sz(const struct btf *btf)
+{
+       if (!btf->ptr_sz)
+               ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+       return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
+}
+
+/* Return pointer size this BTF instance assumes. The size is heuristically
+ * determined by looking for 'long' or 'unsigned long' integer type and
+ * recording its size in bytes. If BTF type information doesn't have any such
+ * type, this function returns 0. In the latter case, native architecture's
+ * pointer size is assumed, so will be either 4 or 8, depending on
+ * architecture that libbpf was compiled for. It's possible to override
+ * guessed value by using btf__set_pointer_size() API.
+ */
+size_t btf__pointer_size(const struct btf *btf)
+{
+       if (!btf->ptr_sz)
+               ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+
+       if (btf->ptr_sz < 0)
+               /* not enough BTF type info to guess */
+               return 0;
+
+       return btf->ptr_sz;
+}
+
+/* Override or set pointer size in bytes. Only values of 4 and 8 are
+ * supported.
+ */
+int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
+{
+       if (ptr_sz != 4 && ptr_sz != 8)
+               return -EINVAL;
+       btf->ptr_sz = ptr_sz;
+       return 0;
+}
+
 static bool btf_type_is_void(const struct btf_type *t)
 {
        return t == &btf_void || btf_is_fwd(t);
@@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
                        size = t->size;
                        goto done;
                case BTF_KIND_PTR:
-                       size = sizeof(void *);
+                       size = btf_ptr_sz(btf);
                        goto done;
                case BTF_KIND_TYPEDEF:
                case BTF_KIND_VOLATILE:
@@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id)
        switch (kind) {
        case BTF_KIND_INT:
        case BTF_KIND_ENUM:
-               return min(sizeof(void *), (size_t)t->size);
+               return min(btf_ptr_sz(btf), (size_t)t->size);
        case BTF_KIND_PTR:
-               return sizeof(void *);
+               return btf_ptr_sz(btf);
        case BTF_KIND_TYPEDEF:
        case BTF_KIND_VOLATILE:
        case BTF_KIND_CONST:
@@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
        if (IS_ERR(btf))
                goto done;
 
+       switch (gelf_getclass(elf)) {
+       case ELFCLASS32:
+               btf__set_pointer_size(btf, 4);
+               break;
+       case ELFCLASS64:
+               btf__set_pointer_size(btf, 8);
+               break;
+       default:
+               pr_warn("failed to get ELF class (bitness) for %s\n", path);
+               break;
+       }
+
        if (btf_ext && btf_ext_data) {
                *btf_ext = btf_ext__new(btf_ext_data->d_buf,
                                        btf_ext_data->d_size);
@@ -564,8 +641,8 @@ done:
 
 struct btf *btf__parse_raw(const char *path)
 {
+       struct btf *btf = NULL;
        void *data = NULL;
-       struct btf *btf;
        FILE *f = NULL;
        __u16 magic;
        int err = 0;
index f4a1a1d..1ca1444 100644 (file)
@@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
 LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
 LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
                                                  __u32 id);
+LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
+LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
 LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
index cf71116..fe39bd7 100644 (file)
@@ -13,6 +13,7 @@
 #include <errno.h>
 #include <linux/err.h>
 #include <linux/btf.h>
+#include <linux/kernel.h>
 #include "btf.h"
 #include "hashmap.h"
 #include "libbpf.h"
@@ -60,6 +61,7 @@ struct btf_dump {
        const struct btf_ext *btf_ext;
        btf_dump_printf_fn_t printf_fn;
        struct btf_dump_opts opts;
+       int ptr_sz;
        bool strip_mods;
 
        /* per-type auxiliary state */
@@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
        d->btf_ext = btf_ext;
        d->printf_fn = printf_fn;
        d->opts.ctx = opts ? opts->ctx : NULL;
+       d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
 
        d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
        if (IS_ERR(d->type_names)) {
@@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
        }
 }
 
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+                                         const struct btf_type *t);
+
 static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
                                     const struct btf_type *t);
 static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
@@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
 
        switch (kind) {
        case BTF_KIND_INT:
+               /* Emit type alias definitions if necessary */
+               btf_dump_emit_missing_aliases(d, id, t);
+
                tstate->emit_state = EMITTED;
                break;
        case BTF_KIND_ENUM:
@@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
                                      int align, int lvl)
 {
        int off_diff = m_off - cur_off;
-       int ptr_bits = sizeof(void *) * 8;
+       int ptr_bits = d->ptr_sz * 8;
 
        if (off_diff <= 0)
                /* no gap */
@@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
                        btf_dump_printf(d, ": %d", m_sz);
                        off = m_off + m_sz;
                } else {
-                       m_sz = max(0, btf__resolve_size(d->btf, m->type));
+                       m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
                        off = m_off + m_sz * 8;
                }
                btf_dump_printf(d, ";");
@@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
                btf_dump_printf(d, " __attribute__((packed))");
 }
 
+static const char *missing_base_types[][2] = {
+       /*
+        * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
+        * SIMD intrinsics. Alias them to standard base types.
+        */
+       { "__Poly8_t",          "unsigned char" },
+       { "__Poly16_t",         "unsigned short" },
+       { "__Poly64_t",         "unsigned long long" },
+       { "__Poly128_t",        "unsigned __int128" },
+};
+
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+                                         const struct btf_type *t)
+{
+       const char *name = btf_dump_type_name(d, id);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
+               if (strcmp(name, missing_base_types[i][0]) == 0) {
+                       btf_dump_printf(d, "typedef %s %s;\n\n",
+                                       missing_base_types[i][1], name);
+                       break;
+               }
+       }
+}
+
 static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
                                   const struct btf_type *t)
 {
index 7be04e4..5d20b2d 100644 (file)
@@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
                                BTF_ELF_SEC, err);
                        goto out;
                }
+               /* enforce 8-byte pointers for BPF-targeted BTFs */
+               btf__set_pointer_size(obj->btf, 8);
                err = 0;
        }
        if (btf_ext_data) {
@@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
                if (IS_ERR(kern_btf))
                        return PTR_ERR(kern_btf);
 
+               /* enforce 8-byte pointers for BPF-targeted BTFs */
+               btf__set_pointer_size(obj->btf, 8);
                bpf_object__sanitize_btf(obj, kern_btf);
        }
 
@@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
 
        map = bpf_create_map_xattr(&map_attr);
        if (map < 0) {
-               cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+               ret = -errno;
+               cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
                pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
-                       __func__, cp, errno);
-               return -errno;
+                       __func__, cp, -ret);
+               return ret;
        }
 
        insns[0].imm = map;
@@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
 static int bpf_object__collect_map_relos(struct bpf_object *obj,
                                         GElf_Shdr *shdr, Elf_Data *data)
 {
-       int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
+       const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
+       int i, j, nrels, new_sz;
        const struct btf_var_secinfo *vi = NULL;
        const struct btf_type *sec, *var, *def;
        const struct btf_member *member;
@@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
 
                        vi = btf_var_secinfos(sec) + map->btf_var_idx;
                        if (vi->offset <= rel.r_offset &&
-                           rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
+                           rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
                                break;
                }
                if (j == obj->nr_maps) {
@@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
                        return -EINVAL;
 
                moff = rel.r_offset - vi->offset - moff;
-               if (moff % ptr_sz)
+               /* here we use BPF pointer size, which is always 64 bit, as we
+                * are parsing ELF that was built for BPF target
+                */
+               if (moff % bpf_ptr_sz)
                        return -EINVAL;
-               moff /= ptr_sz;
+               moff /= bpf_ptr_sz;
                if (moff >= map->init_slots_sz) {
                        new_sz = moff + 1;
-                       tmp = realloc(map->init_slots, new_sz * ptr_sz);
+                       tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
                        if (!tmp)
                                return -ENOMEM;
                        map->init_slots = tmp;
                        memset(map->init_slots + map->init_slots_sz, 0,
-                              (new_sz - map->init_slots_sz) * ptr_sz);
+                              (new_sz - map->init_slots_sz) * host_ptr_sz);
                        map->init_slots_sz = new_sz;
                }
                map->init_slots[moff] = targ_map;
@@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
        }
 
        if (bpf_obj_pin(prog->instances.fds[instance], path)) {
-               cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+               err = -errno;
+               cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
                pr_warn("failed to pin program: %s\n", cp);
-               return -errno;
+               return err;
        }
        pr_debug("pinned program '%s'\n", path);
 
@@ -8306,10 +8316,8 @@ bpf_program__attach_iter(struct bpf_program *prog,
        if (!OPTS_VALID(opts, bpf_iter_attach_opts))
                return ERR_PTR(-EINVAL);
 
-       if (OPTS_HAS(opts, map_fd)) {
-               target_fd = opts->map_fd;
-               link_create_opts.flags = BPF_ITER_LINK_MAP_FD;
-       }
+       link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
+       link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
 
        prog_fd = bpf_program__fd(prog);
        if (prog_fd < 0) {
index 3ed1399..5ecb406 100644 (file)
@@ -267,9 +267,10 @@ LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
 
 struct bpf_iter_attach_opts {
        size_t sz; /* size of this struct for forward/backward compatibility */
-       __u32 map_fd;
+       union bpf_iter_link_info *link_info;
+       __u32 link_info_len;
 };
-#define bpf_iter_attach_opts__last_field map_fd
+#define bpf_iter_attach_opts__last_field link_info_len
 
 LIBBPF_API struct bpf_link *
 bpf_program__attach_iter(struct bpf_program *prog,
index 0c4722b..e35bd6c 100644 (file)
@@ -295,5 +295,7 @@ LIBBPF_0.1.0 {
                bpf_program__set_sk_lookup;
                btf__parse;
                btf__parse_raw;
+               btf__pointer_size;
                btf__set_fd;
+               btf__set_pointer_size;
 } LIBBPF_0.0.9;
index cae9757..8b75efc 100644 (file)
@@ -7,13 +7,13 @@ libperf-counting - counting interface
 
 DESCRIPTION
 -----------
-The counting interface provides API to meassure and get count for specific perf events.
+The counting interface provides API to measure and get count for specific perf events.
 
 The following test tries to explain count on `counting.c` example.
 
 It is by no means complete guide to counting, but shows libperf basic API for counting.
 
-The `counting.c` comes with libbperf package and can be compiled and run like:
+The `counting.c` comes with libperf package and can be compiled and run like:
 
 [source,bash]
 --
@@ -26,7 +26,8 @@ count 176242, enabled 176242, run 176242
 It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event,
 which is available only for root.
 
-The `counting.c` example monitors two events on the current process and displays their count, in a nutshel it:
+The `counting.c` example monitors two events on the current process and displays
+their count, in a nutshell it:
 
 * creates events
 * adds them to the event list
@@ -152,7 +153,7 @@ Configure event list with the thread map and open events:
 --
 
 Both events are created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the whole list explicitely (both events).
+so we need to enable the whole list explicitly (both events).
 
 From this moment events are counting and we can do our workload.
 
@@ -167,7 +168,8 @@ When we are done we disable the events list.
  79         perf_evlist__disable(evlist);
 --
 
-Now we need to get the counts from events, following code iterates throught the events list and read counts:
+Now we need to get the counts from events, following code iterates through the
+events list and read counts:
 
 [source,c]
 --
@@ -178,7 +180,7 @@ Now we need to get the counts from events, following code iterates throught the
  85         }
 --
 
-And finaly cleanup.
+And finally cleanup.
 
 We close the whole events list (both events) and remove it together with the threads map:
 
index d71a7b4..d6ca24f 100644 (file)
@@ -8,13 +8,13 @@ libperf-sampling - sampling interface
 
 DESCRIPTION
 -----------
-The sampling interface provides API to meassure and get count for specific perf events.
+The sampling interface provides API to measure and get count for specific perf events.
 
 The following test tries to explain count on `sampling.c` example.
 
 It is by no means complete guide to sampling, but shows libperf basic API for sampling.
 
-The `sampling.c` comes with libbperf package and can be compiled and run like:
+The `sampling.c` comes with libperf package and can be compiled and run like:
 
 [source,bash]
 --
@@ -33,7 +33,8 @@ cpu   0, pid   4465, tid   4470, ip         7f84fe0ebebf, period             176
 
 It requires root access, because it uses hardware cycles event.
 
-The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a nutshel it:
+The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a
+nutshell it:
 
 - creates events
 - adds them to the event list
@@ -90,7 +91,7 @@ Once the setup is complete we start by defining cycles event using the `struct p
  36         };
 --
 
-Next step is to prepare cpus map.
+Next step is to prepare CPUs map.
 
 In this case we will monitor all the available CPUs:
 
@@ -152,7 +153,7 @@ Once the events list is open, we can create memory maps AKA perf ring buffers:
 --
 
 The event is created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the events list explicitely.
+so we need to enable the events list explicitly.
 
 From this moment the cycles event is sampling.
 
@@ -212,7 +213,7 @@ Each sample needs to get parsed:
 106                                 cpu, pid, tid, ip, period);
 --
 
-And finaly cleanup.
+And finally cleanup.
 
 We close the whole events list (both events) and remove it together with the threads map:
 
index 5a6bb51..0c74c30 100644 (file)
@@ -29,7 +29,7 @@ SYNOPSIS
   void libperf_init(libperf_print_fn_t fn);
 --
 
-*API to handle cpu maps:*
+*API to handle CPU maps:*
 
 [source,c]
 --
@@ -217,7 +217,7 @@ Following objects are key to the libperf interface:
 
 [horizontal]
 
-struct perf_cpu_map:: Provides a cpu list abstraction.
+struct perf_cpu_map:: Provides a CPU list abstraction.
 
 struct perf_thread_map:: Provides a thread list abstraction.
 
index 6a875a0..2208444 100644 (file)
@@ -305,9 +305,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 }
 
 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
-                           void *ptr, short revent)
+                           void *ptr, short revent, enum fdarray_flags flags)
 {
-       int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
+       int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
 
        if (pos >= 0) {
                evlist->pollfd.priv[pos].ptr = ptr;
@@ -488,7 +488,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                revent = !overwrite ? POLLIN : 0;
 
                if (!evsel->system_wide &&
-                   perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
+                   perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
                        perf_mmap__put(map);
                        return -1;
                }
index 74dc8c3..2d0fa02 100644 (file)
@@ -45,7 +45,7 @@ struct perf_evlist_mmap_ops {
 
 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
-                           void *ptr, short revent);
+                           void *ptr, short revent, enum fdarray_flags flags);
 
 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
                          struct perf_evlist_mmap_ops *ops,
index 69b44d2..8420288 100644 (file)
@@ -111,6 +111,14 @@ struct perf_record_cgroup {
        char                     path[PATH_MAX];
 };
 
+struct perf_record_text_poke_event {
+       struct perf_event_header header;
+       __u64                   addr;
+       __u16                   old_len;
+       __u16                   new_len;
+       __u8                    bytes[];
+};
+
 struct perf_record_sample {
        struct perf_event_header header;
        __u64                    array[];
@@ -367,6 +375,7 @@ union perf_event {
        struct perf_record_sample               sample;
        struct perf_record_bpf_event            bpf;
        struct perf_record_ksymbol              ksymbol;
+       struct perf_record_text_poke_event      text_poke;
        struct perf_record_header_attr          attr;
        struct perf_record_event_update         event_update;
        struct perf_record_header_event_type    event_type;
index 596032a..4d63943 100644 (file)
@@ -3,7 +3,7 @@ libtraceevent(3)
 
 NAME
 ----
-tep_load_plugins, tep_unload_plugins - Load / unload traceevent plugins.
+tep_load_plugins, tep_unload_plugins, tep_load_plugins_hook - Load / unload traceevent plugins.
 
 SYNOPSIS
 --------
@@ -13,6 +13,12 @@ SYNOPSIS
 
 struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
 void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
+void *tep_load_plugins_hook*(struct tep_handle pass:[*]_tep_, const char pass:[*]_suffix_,
+                          void (pass:[*]_load_plugin_)(struct tep_handle pass:[*]tep,
+                                              const char pass:[*]path,
+                                              const char pass:[*]name,
+                                              void pass:[*]data),
+                          void pass:[*]_data_);
 --
 
 DESCRIPTION
@@ -22,11 +28,13 @@ directories. The _tep_ argument is trace event parser context.
 The plugin directories are :
 [verse]
 --
+       - Directories, specified in _tep_->plugins_dir with priority TEP_PLUGIN_FIRST
        - System's plugin directory, defined at the library compile time. It
          depends on the library installation prefix and usually is
          _(install_preffix)/lib/traceevent/plugins_
        - Directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
        - User's plugin directory, located at _~/.local/lib/traceevent/plugins_
+       - Directories, specified in _tep_->plugins_dir with priority TEP_PLUGIN_LAST
 --
 Loading of plugins can be controlled by the _tep_flags_, using the
 _tep_set_flag()_ API:
@@ -44,6 +52,12 @@ _tep_load_plugins()_. The _tep_ argument is trace event parser context. The
 _plugin_list_ is the list of loaded plugins, returned by
 the _tep_load_plugins()_ function.
 
+The _tep_load_plugins_hook_ function walks through all directories with plugins
+and calls user specified _load_plugin()_ hook for each plugin file. Only files
+with given _suffix_ are considered to be plugins. The _data_ is a user specified
+context, passed to _load_plugin()_. Directories and the walk order are the same
+as in _tep_load_plugins()_ API.
+
 RETURN VALUE
 ------------
 The _tep_load_plugins()_ function returns a list of successfully loaded plugins,
@@ -63,6 +77,15 @@ if (plugins == NULL) {
 }
 ...
 tep_unload_plugins(plugins, tep);
+...
+void print_plugin(struct tep_handle *tep, const char *path,
+                 const char *name, void *data)
+{
+       pritnf("Found libtraceevent plugin %s/%s\n", path, name);
+}
+...
+tep_load_plugins_hook(tep, ".so", print_plugin, NULL);
+...
 --
 
 FILES
index cee4698..d805a92 100644 (file)
@@ -13,6 +13,7 @@ struct func_map;
 struct func_list;
 struct event_handler;
 struct func_resolver;
+struct tep_plugins_dir;
 
 struct tep_handle {
        int ref_count;
@@ -47,7 +48,6 @@ struct tep_handle {
        struct printk_list *printklist;
        unsigned int printk_count;
 
-
        struct tep_event **events;
        int nr_events;
        struct tep_event **sort_events;
@@ -81,10 +81,30 @@ struct tep_handle {
 
        /* cache */
        struct tep_event *last_event;
+
+       struct tep_plugins_dir *plugins_dir;
+};
+
+enum tep_print_parse_type {
+       PRINT_FMT_STRING,
+       PRINT_FMT_ARG_DIGIT,
+       PRINT_FMT_ARG_POINTER,
+       PRINT_FMT_ARG_STRING,
+};
+
+struct tep_print_parse {
+       struct tep_print_parse  *next;
+
+       char                            *format;
+       int                             ls;
+       enum tep_print_parse_type       type;
+       struct tep_print_arg            *arg;
+       struct tep_print_arg            *len_as_arg;
 };
 
 void tep_free_event(struct tep_event *event);
 void tep_free_format_field(struct tep_format_field *field);
+void tep_free_plugin_paths(struct tep_handle *tep);
 
 unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
 unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
index ba4f338..3ba566d 100644 (file)
@@ -4565,43 +4565,93 @@ get_bprint_format(void *data, int size __maybe_unused,
        return format;
 }
 
-static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
-                         struct tep_event *event, struct tep_print_arg *arg)
+static int print_mac_arg(struct trace_seq *s, const char *format,
+                        void *data, int size, struct tep_event *event,
+                        struct tep_print_arg *arg)
 {
-       unsigned char *buf;
        const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
+       bool reverse = false;
+       unsigned char *buf;
+       int ret = 0;
 
        if (arg->type == TEP_PRINT_FUNC) {
                process_defined_func(s, data, size, event, arg);
-               return;
+               return 0;
        }
 
        if (arg->type != TEP_PRINT_FIELD) {
                trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d",
                                 arg->type);
-               return;
+               return 0;
        }
 
-       if (mac == 'm')
+       if (format[0] == 'm') {
                fmt = "%.2x%.2x%.2x%.2x%.2x%.2x";
+       } else if (format[0] == 'M' && format[1] == 'F') {
+               fmt = "%.2x-%.2x-%.2x-%.2x-%.2x-%.2x";
+               ret++;
+       }
+       if (format[1] == 'R') {
+               reverse = true;
+               ret++;
+       }
+
        if (!arg->field.field) {
                arg->field.field =
                        tep_find_any_field(event, arg->field.name);
                if (!arg->field.field) {
                        do_warning_event(event, "%s: field %s not found",
                                         __func__, arg->field.name);
-                       return;
+                       return ret;
                }
        }
        if (arg->field.field->size != 6) {
                trace_seq_printf(s, "INVALIDMAC");
-               return;
+               return ret;
        }
+
        buf = data + arg->field.field->offset;
-       trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+       if (reverse)
+               trace_seq_printf(s, fmt, buf[5], buf[4], buf[3], buf[2], buf[1], buf[0]);
+       else
+               trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+
+       return ret;
 }
 
-static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
+static int parse_ip4_print_args(struct tep_handle *tep,
+                               const char *ptr, bool *reverse)
+{
+       int ret = 0;
+
+       *reverse = false;
+
+       /* hnbl */
+       switch (*ptr) {
+       case 'h':
+               if (tep->file_bigendian)
+                       *reverse = false;
+               else
+                       *reverse = true;
+               ret++;
+               break;
+       case 'l':
+               *reverse = true;
+               ret++;
+               break;
+       case 'n':
+       case 'b':
+               ret++;
+               /* fall through */
+       default:
+               *reverse = false;
+               break;
+       }
+
+       return ret;
+}
+
+static void print_ip4_addr(struct trace_seq *s, char i, bool reverse, unsigned char *buf)
 {
        const char *fmt;
 
@@ -4610,7 +4660,11 @@ static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
        else
                fmt = "%d.%d.%d.%d";
 
-       trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
+       if (reverse)
+               trace_seq_printf(s, fmt, buf[3], buf[2], buf[1], buf[0]);
+       else
+               trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
+
 }
 
 static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
@@ -4693,7 +4747,7 @@ static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
        if (useIPv4) {
                if (needcolon)
                        trace_seq_printf(s, ":");
-               print_ip4_addr(s, 'I', &in6.s6_addr[12]);
+               print_ip4_addr(s, 'I', false, &in6.s6_addr[12]);
        }
 
        return;
@@ -4722,16 +4776,20 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
                          void *data, int size, struct tep_event *event,
                          struct tep_print_arg *arg)
 {
+       bool reverse = false;
        unsigned char *buf;
+       int ret;
+
+       ret = parse_ip4_print_args(event->tep, ptr, &reverse);
 
        if (arg->type == TEP_PRINT_FUNC) {
                process_defined_func(s, data, size, event, arg);
-               return 0;
+               return ret;
        }
 
        if (arg->type != TEP_PRINT_FIELD) {
                trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
-               return 0;
+               return ret;
        }
 
        if (!arg->field.field) {
@@ -4740,7 +4798,7 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
                if (!arg->field.field) {
                        do_warning("%s: field %s not found",
                                   __func__, arg->field.name);
-                       return 0;
+                       return ret;
                }
        }
 
@@ -4748,11 +4806,12 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
 
        if (arg->field.field->size != 4) {
                trace_seq_printf(s, "INVALIDIPv4");
-               return 0;
+               return ret;
        }
-       print_ip4_addr(s, i, buf);
 
-       return 0;
+       print_ip4_addr(s, i, reverse, buf);
+       return ret;
+
 }
 
 static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
@@ -4812,7 +4871,9 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
        char have_c = 0, have_p = 0;
        unsigned char *buf;
        struct sockaddr_storage *sa;
+       bool reverse = false;
        int rc = 0;
+       int ret;
 
        /* pISpc */
        if (i == 'I') {
@@ -4827,6 +4888,9 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
                        rc++;
                }
        }
+       ret = parse_ip4_print_args(event->tep, ptr, &reverse);
+       ptr += ret;
+       rc += ret;
 
        if (arg->type == TEP_PRINT_FUNC) {
                process_defined_func(s, data, size, event, arg);
@@ -4858,7 +4922,7 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
                        return rc;
                }
 
-               print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
+               print_ip4_addr(s, i, reverse, (unsigned char *) &sa4->sin_addr);
                if (have_p)
                        trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
 
@@ -4892,25 +4956,20 @@ static int print_ip_arg(struct trace_seq *s, const char *ptr,
                        struct tep_print_arg *arg)
 {
        char i = *ptr;  /* 'i' or 'I' */
-       char ver;
-       int rc = 0;
+       int rc = 1;
 
+       /* IP version */
        ptr++;
-       rc++;
 
-       ver = *ptr;
-       ptr++;
-       rc++;
-
-       switch (ver) {
+       switch (*ptr) {
        case '4':
-               rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
+               rc += print_ipv4_arg(s, ptr + 1, i, data, size, event, arg);
                break;
        case '6':
-               rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
+               rc += print_ipv6_arg(s, ptr + 1, i, data, size, event, arg);
                break;
        case 'S':
-               rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
+               rc += print_ipsa_arg(s, ptr + 1, i, data, size, event, arg);
                break;
        default:
                return 0;
@@ -4919,6 +4978,133 @@ static int print_ip_arg(struct trace_seq *s, const char *ptr,
        return rc;
 }
 
+static const int guid_index[16] = {3, 2, 1, 0, 5, 4, 7, 6, 8, 9, 10, 11, 12, 13, 14, 15};
+static const int uuid_index[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+static int print_uuid_arg(struct trace_seq *s, const char *ptr,
+                       void *data, int size, struct tep_event *event,
+                       struct tep_print_arg *arg)
+{
+       const int *index = uuid_index;
+       char *format = "%02x";
+       int ret = 0;
+       char *buf;
+       int i;
+
+       switch (*(ptr + 1)) {
+       case 'L':
+               format = "%02X";
+               /* fall through */
+       case 'l':
+               index = guid_index;
+               ret++;
+               break;
+       case 'B':
+               format = "%02X";
+               /* fall through */
+       case 'b':
+               ret++;
+               break;
+       }
+
+       if (arg->type == TEP_PRINT_FUNC) {
+               process_defined_func(s, data, size, event, arg);
+               return ret;
+       }
+
+       if (arg->type != TEP_PRINT_FIELD) {
+               trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+               return ret;
+       }
+
+       if (!arg->field.field) {
+               arg->field.field =
+                       tep_find_any_field(event, arg->field.name);
+               if (!arg->field.field) {
+                       do_warning("%s: field %s not found",
+                                  __func__, arg->field.name);
+                       return ret;
+               }
+       }
+
+       if (arg->field.field->size != 16) {
+               trace_seq_printf(s, "INVALIDUUID");
+               return ret;
+       }
+
+       buf = data + arg->field.field->offset;
+
+       for (i = 0; i < 16; i++) {
+               trace_seq_printf(s, format, buf[index[i]] & 0xff);
+               switch (i) {
+               case 3:
+               case 5:
+               case 7:
+               case 9:
+                       trace_seq_printf(s, "-");
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int print_raw_buff_arg(struct trace_seq *s, const char *ptr,
+                             void *data, int size, struct tep_event *event,
+                             struct tep_print_arg *arg, int print_len)
+{
+       int plen = print_len;
+       char *delim = " ";
+       int ret = 0;
+       char *buf;
+       int i;
+       unsigned long offset;
+       int arr_len;
+
+       switch (*(ptr + 1)) {
+       case 'C':
+               delim = ":";
+               ret++;
+               break;
+       case 'D':
+               delim = "-";
+               ret++;
+               break;
+       case 'N':
+               delim = "";
+               ret++;
+               break;
+       }
+
+       if (arg->type == TEP_PRINT_FUNC) {
+               process_defined_func(s, data, size, event, arg);
+               return ret;
+       }
+
+       if (arg->type != TEP_PRINT_DYNAMIC_ARRAY) {
+               trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+               return ret;
+       }
+
+       offset = tep_read_number(event->tep,
+                                data + arg->dynarray.field->offset,
+                                arg->dynarray.field->size);
+       arr_len = (unsigned long long)(offset >> 16);
+       buf = data + (offset & 0xffff);
+
+       if (arr_len < plen)
+               plen = arr_len;
+
+       if (plen < 1)
+               return ret;
+
+       trace_seq_printf(s, "%02x", buf[0] & 0xff);
+       for (i = 1; i < plen; i++)
+               trace_seq_printf(s, "%s%02x", delim, buf[i] & 0xff);
+
+       return ret;
+}
+
 static int is_printable_array(char *p, unsigned int len)
 {
        unsigned int i;
@@ -5007,264 +5193,567 @@ void tep_print_fields(struct trace_seq *s, void *data,
        }
 }
 
-static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
+static int print_function(struct trace_seq *s, const char *format,
+                         void *data, int size, struct tep_event *event,
+                         struct tep_print_arg *arg)
 {
-       struct tep_handle *tep = event->tep;
-       struct tep_print_fmt *print_fmt = &event->print_fmt;
-       struct tep_print_arg *arg = print_fmt->args;
-       struct tep_print_arg *args = NULL;
-       const char *ptr = print_fmt->format;
-       unsigned long long val;
        struct func_map *func;
-       const char *saveptr;
-       struct trace_seq p;
-       char *bprint_fmt = NULL;
-       char format[32];
-       int show_func;
-       int len_as_arg;
-       int len_arg = 0;
-       int len;
-       int ls;
+       unsigned long long val;
 
-       if (event->flags & TEP_EVENT_FL_FAILED) {
-               trace_seq_printf(s, "[FAILED TO PARSE]");
-               tep_print_fields(s, data, size, event);
-               return;
+       val = eval_num_arg(data, size, event, arg);
+       func = find_func(event->tep, val);
+       if (func) {
+               trace_seq_puts(s, func->func);
+               if (*format == 'F' || *format == 'S')
+                       trace_seq_printf(s, "+0x%llx", val - func->addr);
+       } else {
+               if (event->tep->long_size == 4)
+                       trace_seq_printf(s, "0x%lx", (long)val);
+               else
+                       trace_seq_printf(s, "0x%llx", (long long)val);
        }
 
-       if (event->flags & TEP_EVENT_FL_ISBPRINT) {
-               bprint_fmt = get_bprint_format(data, size, event);
-               args = make_bprint_args(bprint_fmt, data, size, event);
-               arg = args;
-               ptr = bprint_fmt;
+       return 0;
+}
+
+static int print_arg_pointer(struct trace_seq *s, const char *format, int plen,
+                            void *data, int size,
+                            struct tep_event *event, struct tep_print_arg *arg)
+{
+       unsigned long long val;
+       int ret = 1;
+
+       if (arg->type == TEP_PRINT_BSTRING) {
+               trace_seq_puts(s, arg->string.string);
+               return 0;
+       }
+       while (*format) {
+               if (*format == 'p') {
+                       format++;
+                       break;
+               }
+               format++;
        }
 
-       for (; *ptr; ptr++) {
-               ls = 0;
-               if (*ptr == '\\') {
-                       ptr++;
-                       switch (*ptr) {
+       switch (*format) {
+       case 'F':
+       case 'f':
+       case 'S':
+       case 's':
+               ret += print_function(s, format, data, size, event, arg);
+               break;
+       case 'M':
+       case 'm':
+               ret += print_mac_arg(s, format, data, size, event, arg);
+               break;
+       case 'I':
+       case 'i':
+               ret += print_ip_arg(s, format, data, size, event, arg);
+               break;
+       case 'U':
+               ret += print_uuid_arg(s, format, data, size, event, arg);
+               break;
+       case 'h':
+               ret += print_raw_buff_arg(s, format, data, size, event, arg, plen);
+               break;
+       default:
+               ret = 0;
+               val = eval_num_arg(data, size, event, arg);
+               trace_seq_printf(s, "%p", (void *)val);
+               break;
+       }
+
+       return ret;
+
+}
+
+static int print_arg_number(struct trace_seq *s, const char *format, int plen,
+                           void *data, int size, int ls,
+                           struct tep_event *event, struct tep_print_arg *arg)
+{
+       unsigned long long val;
+
+       val = eval_num_arg(data, size, event, arg);
+
+       switch (ls) {
+       case -2:
+               if (plen >= 0)
+                       trace_seq_printf(s, format, plen, (char)val);
+               else
+                       trace_seq_printf(s, format, (char)val);
+               break;
+       case -1:
+               if (plen >= 0)
+                       trace_seq_printf(s, format, plen, (short)val);
+               else
+                       trace_seq_printf(s, format, (short)val);
+               break;
+       case 0:
+               if (plen >= 0)
+                       trace_seq_printf(s, format, plen, (int)val);
+               else
+                       trace_seq_printf(s, format, (int)val);
+               break;
+       case 1:
+               if (plen >= 0)
+                       trace_seq_printf(s, format, plen, (long)val);
+               else
+                       trace_seq_printf(s, format, (long)val);
+               break;
+       case 2:
+               if (plen >= 0)
+                       trace_seq_printf(s, format, plen, (long long)val);
+               else
+                       trace_seq_printf(s, format, (long long)val);
+               break;
+       default:
+               do_warning_event(event, "bad count (%d)", ls);
+               event->flags |= TEP_EVENT_FL_FAILED;
+       }
+       return 0;
+}
+
+
+static void print_arg_string(struct trace_seq *s, const char *format, int plen,
+                            void *data, int size,
+                            struct tep_event *event, struct tep_print_arg *arg)
+{
+       struct trace_seq p;
+
+       /* Use helper trace_seq */
+       trace_seq_init(&p);
+       print_str_arg(&p, data, size, event,
+                     format, plen, arg);
+       trace_seq_terminate(&p);
+       trace_seq_puts(s, p.buffer);
+       trace_seq_destroy(&p);
+}
+
+static int parse_arg_format_pointer(const char *format)
+{
+       int ret = 0;
+       int index;
+       int loop;
+
+       switch (*format) {
+       case 'F':
+       case 'S':
+       case 'f':
+       case 's':
+               ret++;
+               break;
+       case 'M':
+       case 'm':
+               /* [mM]R , [mM]F */
+               switch (format[1]) {
+               case 'R':
+               case 'F':
+                       ret++;
+                       break;
+               }
+               ret++;
+               break;
+       case 'I':
+       case 'i':
+               index = 2;
+               loop = 1;
+               switch (format[1]) {
+               case 'S':
+                       /*[S][pfs]*/
+                       while (loop) {
+                               switch (format[index]) {
+                               case 'p':
+                               case 'f':
+                               case 's':
+                                       ret++;
+                                       index++;
+                                       break;
+                               default:
+                                       loop = 0;
+                                       break;
+                               }
+                       }
+                       /* fall through */
+               case '4':
+                       /* [4S][hnbl] */
+                       switch (format[index]) {
+                       case 'h':
                        case 'n':
-                               trace_seq_putc(s, '\n');
-                               break;
-                       case 't':
-                               trace_seq_putc(s, '\t');
-                               break;
-                       case 'r':
-                               trace_seq_putc(s, '\r');
-                               break;
-                       case '\\':
-                               trace_seq_putc(s, '\\');
+                       case 'l':
+                       case 'b':
+                               ret++;
+                               index++;
                                break;
-                       default:
-                               trace_seq_putc(s, *ptr);
+                       }
+                       if (format[1] == '4') {
+                               ret++;
                                break;
                        }
+                       /* fall through */
+               case '6':
+                       /* [6S]c */
+                       if (format[index] == 'c')
+                               ret++;
+                       ret++;
+                       break;
+               }
+               ret++;
+               break;
+       case 'U':
+               switch (format[1]) {
+               case 'L':
+               case 'l':
+               case 'B':
+               case 'b':
+                       ret++;
+                       break;
+               }
+               ret++;
+               break;
+       case 'h':
+               switch (format[1]) {
+               case 'C':
+               case 'D':
+               case 'N':
+                       ret++;
+                       break;
+               }
+               ret++;
+               break;
+       default:
+               break;
+       }
 
-               } else if (*ptr == '%') {
-                       saveptr = ptr;
-                       show_func = 0;
-                       len_as_arg = 0;
- cont_process:
-                       ptr++;
-                       switch (*ptr) {
-                       case '%':
-                               trace_seq_putc(s, '%');
-                               break;
-                       case '#':
-                               /* FIXME: need to handle properly */
-                               goto cont_process;
-                       case 'h':
-                               ls--;
-                               goto cont_process;
-                       case 'l':
-                               ls++;
-                               goto cont_process;
-                       case 'L':
-                               ls = 2;
-                               goto cont_process;
-                       case '*':
-                               /* The argument is the length. */
-                               if (!arg) {
-                                       do_warning_event(event, "no argument match");
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                                       goto out_failed;
-                               }
-                               len_arg = eval_num_arg(data, size, event, arg);
-                               len_as_arg = 1;
-                               arg = arg->next;
-                               goto cont_process;
-                       case '.':
-                       case 'z':
-                       case 'Z':
-                       case '0' ... '9':
-                       case '-':
-                               goto cont_process;
-                       case 'p':
-                               if (tep->long_size == 4)
-                                       ls = 1;
-                               else
-                                       ls = 2;
+       return ret;
+}
 
-                               if (isalnum(ptr[1]))
-                                       ptr++;
+static void free_parse_args(struct tep_print_parse *arg)
+{
+       struct tep_print_parse *del;
 
-                               if (arg->type == TEP_PRINT_BSTRING) {
-                                       trace_seq_puts(s, arg->string.string);
-                                       arg = arg->next;
-                                       break;
-                               }
+       while (arg) {
+               del = arg;
+               arg = del->next;
+               free(del->format);
+               free(del);
+       }
+}
 
-                               if (*ptr == 'F' || *ptr == 'f' ||
-                                   *ptr == 'S' || *ptr == 's') {
-                                       show_func = *ptr;
-                               } else if (*ptr == 'M' || *ptr == 'm') {
-                                       print_mac_arg(s, *ptr, data, size, event, arg);
-                                       arg = arg->next;
-                                       break;
-                               } else if (*ptr == 'I' || *ptr == 'i') {
-                                       int n;
+static int parse_arg_add(struct tep_print_parse **parse, char *format,
+                        enum tep_print_parse_type type,
+                        struct tep_print_arg *arg,
+                        struct tep_print_arg *len_as_arg,
+                        int ls)
+{
+       struct tep_print_parse *parg = NULL;
 
-                                       n = print_ip_arg(s, ptr, data, size, event, arg);
-                                       if (n > 0) {
-                                               ptr += n - 1;
-                                               arg = arg->next;
-                                               break;
-                                       }
-                               }
+       parg = calloc(1, sizeof(*parg));
+       if (!parg)
+               goto error;
+       parg->format = strdup(format);
+       if (!parg->format)
+               goto error;
+       parg->type = type;
+       parg->arg = arg;
+       parg->len_as_arg = len_as_arg;
+       parg->ls = ls;
+       *parse = parg;
+       return 0;
+error:
+       if (parg) {
+               free(parg->format);
+               free(parg);
+       }
+       return -1;
+}
 
-                               /* fall through */
-                       case 'd':
-                       case 'u':
-                       case 'i':
-                       case 'x':
-                       case 'X':
-                       case 'o':
-                               if (!arg) {
-                                       do_warning_event(event, "no argument match");
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                                       goto out_failed;
-                               }
+static int parse_arg_format(struct tep_print_parse **parse,
+                           struct tep_event *event,
+                           const char *format, struct tep_print_arg **arg)
+{
+       struct tep_print_arg *len_arg = NULL;
+       char print_format[32];
+       const char *start = format;
+       int ret = 0;
+       int ls = 0;
+       int res;
+       int len;
 
-                               len = ((unsigned long)ptr + 1) -
-                                       (unsigned long)saveptr;
+       format++;
+       ret++;
+       for (; *format; format++) {
+               switch (*format) {
+               case '#':
+                       /* FIXME: need to handle properly */
+                       break;
+               case 'h':
+                       ls--;
+                       break;
+               case 'l':
+                       ls++;
+                       break;
+               case 'L':
+                       ls = 2;
+                       break;
+               case '.':
+               case 'z':
+               case 'Z':
+               case '0' ... '9':
+               case '-':
+                       break;
+               case '*':
+                       /* The argument is the length. */
+                       if (!*arg) {
+                               do_warning_event(event, "no argument match");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               goto out_failed;
+                       }
+                       if (len_arg) {
+                               do_warning_event(event, "argument already matched");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               goto out_failed;
+                       }
+                       len_arg = *arg;
+                       *arg = (*arg)->next;
+                       break;
+               case 'p':
+                       if (!*arg) {
+                               do_warning_event(event, "no argument match");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               goto out_failed;
+                       }
+                       res = parse_arg_format_pointer(format + 1);
+                       if (res > 0) {
+                               format += res;
+                               ret += res;
+                       }
+                       len = ((unsigned long)format + 1) -
+                               (unsigned long)start;
+                       /* should never happen */
+                       if (len > 31) {
+                               do_warning_event(event, "bad format!");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               len = 31;
+                       }
+                       memcpy(print_format, start, len);
+                       print_format[len] = 0;
 
-                               /* should never happen */
-                               if (len > 31) {
-                                       do_warning_event(event, "bad format!");
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                                       len = 31;
-                               }
+                       parse_arg_add(parse, print_format,
+                                     PRINT_FMT_ARG_POINTER, *arg, len_arg, ls);
+                       *arg = (*arg)->next;
+                       ret++;
+                       return ret;
+               case 'd':
+               case 'u':
+               case 'i':
+               case 'x':
+               case 'X':
+               case 'o':
+                       if (!*arg) {
+                               do_warning_event(event, "no argument match");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               goto out_failed;
+                       }
 
-                               memcpy(format, saveptr, len);
-                               format[len] = 0;
+                       len = ((unsigned long)format + 1) -
+                               (unsigned long)start;
 
-                               val = eval_num_arg(data, size, event, arg);
-                               arg = arg->next;
+                       /* should never happen */
+                       if (len > 30) {
+                               do_warning_event(event, "bad format!");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               len = 31;
+                       }
+                       memcpy(print_format, start, len);
+                       print_format[len] = 0;
 
-                               if (show_func) {
-                                       func = find_func(tep, val);
-                                       if (func) {
-                                               trace_seq_puts(s, func->func);
-                                               if (show_func == 'F')
-                                                       trace_seq_printf(s,
-                                                              "+0x%llx",
-                                                              val - func->addr);
-                                               break;
-                                       }
-                               }
-                               if (tep->long_size == 8 && ls == 1 &&
-                                   sizeof(long) != 8) {
-                                       char *p;
-
-                                       /* make %l into %ll */
-                                       if (ls == 1 && (p = strchr(format, 'l')))
-                                               memmove(p+1, p, strlen(p)+1);
-                                       else if (strcmp(format, "%p") == 0)
-                                               strcpy(format, "0x%llx");
-                                       ls = 2;
-                               }
-                               switch (ls) {
-                               case -2:
-                                       if (len_as_arg)
-                                               trace_seq_printf(s, format, len_arg, (char)val);
-                                       else
-                                               trace_seq_printf(s, format, (char)val);
-                                       break;
-                               case -1:
-                                       if (len_as_arg)
-                                               trace_seq_printf(s, format, len_arg, (short)val);
-                                       else
-                                               trace_seq_printf(s, format, (short)val);
-                                       break;
-                               case 0:
-                                       if (len_as_arg)
-                                               trace_seq_printf(s, format, len_arg, (int)val);
-                                       else
-                                               trace_seq_printf(s, format, (int)val);
-                                       break;
-                               case 1:
-                                       if (len_as_arg)
-                                               trace_seq_printf(s, format, len_arg, (long)val);
-                                       else
-                                               trace_seq_printf(s, format, (long)val);
-                                       break;
-                               case 2:
-                                       if (len_as_arg)
-                                               trace_seq_printf(s, format, len_arg,
-                                                                (long long)val);
-                                       else
-                                               trace_seq_printf(s, format, (long long)val);
-                                       break;
-                               default:
-                                       do_warning_event(event, "bad count (%d)", ls);
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                               }
-                               break;
-                       case 's':
-                               if (!arg) {
-                                       do_warning_event(event, "no matching argument");
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                                       goto out_failed;
-                               }
+                       if (event->tep->long_size == 8 && ls == 1 &&
+                           sizeof(long) != 8) {
+                               char *p;
+
+                               /* make %l into %ll */
+                               if (ls == 1 && (p = strchr(print_format, 'l')))
+                                       memmove(p+1, p, strlen(p)+1);
+                               ls = 2;
+                       }
+                       if (ls < -2 || ls > 2) {
+                               do_warning_event(event, "bad count (%d)", ls);
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                       }
+                       parse_arg_add(parse, print_format,
+                                     PRINT_FMT_ARG_DIGIT, *arg, len_arg, ls);
+                       *arg = (*arg)->next;
+                       ret++;
+                       return ret;
+               case 's':
+                       if (!*arg) {
+                               do_warning_event(event, "no matching argument");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               goto out_failed;
+                       }
 
-                               len = ((unsigned long)ptr + 1) -
-                                       (unsigned long)saveptr;
+                       len = ((unsigned long)format + 1) -
+                               (unsigned long)start;
 
-                               /* should never happen */
-                               if (len > 31) {
-                                       do_warning_event(event, "bad format!");
-                                       event->flags |= TEP_EVENT_FL_FAILED;
-                                       len = 31;
-                               }
+                       /* should never happen */
+                       if (len > 31) {
+                               do_warning_event(event, "bad format!");
+                               event->flags |= TEP_EVENT_FL_FAILED;
+                               len = 31;
+                       }
+
+                       memcpy(print_format, start, len);
+                       print_format[len] = 0;
+
+                       parse_arg_add(parse, print_format,
+                                       PRINT_FMT_ARG_STRING, *arg, len_arg, 0);
+                       *arg = (*arg)->next;
+                       ret++;
+                       return ret;
+               default:
+                       snprintf(print_format, 32, ">%c<", *format);
+                       parse_arg_add(parse, print_format,
+                                       PRINT_FMT_STRING, NULL, NULL, 0);
+                       ret++;
+                       return ret;
+               }
+               ret++;
+       }
+
+out_failed:
+       return ret;
 
-                               memcpy(format, saveptr, len);
-                               format[len] = 0;
-                               if (!len_as_arg)
-                                       len_arg = -1;
-                               /* Use helper trace_seq */
-                               trace_seq_init(&p);
-                               print_str_arg(&p, data, size, event,
-                                             format, len_arg, arg);
-                               trace_seq_terminate(&p);
-                               trace_seq_puts(s, p.buffer);
-                               trace_seq_destroy(&p);
-                               arg = arg->next;
+}
+
+static int parse_arg_string(struct tep_print_parse **parse, const char *format)
+{
+       struct trace_seq s;
+       int ret = 0;
+
+       trace_seq_init(&s);
+       for (; *format; format++) {
+               if (*format == '\\') {
+                       format++;
+                       ret++;
+                       switch (*format) {
+                       case 'n':
+                               trace_seq_putc(&s, '\n');
+                               break;
+                       case 't':
+                               trace_seq_putc(&s, '\t');
+                               break;
+                       case 'r':
+                               trace_seq_putc(&s, '\r');
+                               break;
+                       case '\\':
+                               trace_seq_putc(&s, '\\');
                                break;
                        default:
-                               trace_seq_printf(s, ">%c<", *ptr);
-
+                               trace_seq_putc(&s, *format);
+                               break;
                        }
+               } else if (*format == '%') {
+                       if (*(format + 1) == '%') {
+                               trace_seq_putc(&s, '%');
+                               format++;
+                               ret++;
+                       } else
+                               break;
                } else
-                       trace_seq_putc(s, *ptr);
+                       trace_seq_putc(&s, *format);
+
+               ret++;
+       }
+       trace_seq_terminate(&s);
+       parse_arg_add(parse, s.buffer, PRINT_FMT_STRING, NULL, NULL, 0);
+       trace_seq_destroy(&s);
+
+       return ret;
+}
+
+static struct tep_print_parse *
+parse_args(struct tep_event *event, const char *format, struct tep_print_arg *arg)
+{
+       struct tep_print_parse *parse_ret = NULL;
+       struct tep_print_parse **parse = NULL;
+       int ret;
+       int len;
+
+       len = strlen(format);
+       while (*format) {
+               if (!parse_ret)
+                       parse = &parse_ret;
+               if (*format == '%' && *(format + 1) != '%')
+                       ret = parse_arg_format(parse, event, format, &arg);
+               else
+                       ret = parse_arg_string(parse, format);
+               if (*parse)
+                       parse = &((*parse)->next);
+
+               len -= ret;
+               if (len > 0)
+                       format += ret;
+               else
+                       break;
+       }
+       return parse_ret;
+}
+
+static void print_event_cache(struct tep_print_parse *parse, struct trace_seq *s,
+                             void *data, int size, struct tep_event *event)
+{
+       int len_arg;
+
+       while (parse) {
+               if (parse->len_as_arg)
+                       len_arg = eval_num_arg(data, size, event, parse->len_as_arg);
+               switch (parse->type) {
+               case PRINT_FMT_ARG_DIGIT:
+                       print_arg_number(s, parse->format,
+                                       parse->len_as_arg ? len_arg : -1, data,
+                                        size, parse->ls, event, parse->arg);
+                       break;
+               case PRINT_FMT_ARG_POINTER:
+                       print_arg_pointer(s, parse->format,
+                                         parse->len_as_arg ? len_arg : 1,
+                                         data, size, event, parse->arg);
+                       break;
+               case PRINT_FMT_ARG_STRING:
+                       print_arg_string(s, parse->format,
+                                        parse->len_as_arg ? len_arg : -1,
+                                        data, size, event, parse->arg);
+                       break;
+               case PRINT_FMT_STRING:
+               default:
+                       trace_seq_printf(s, "%s", parse->format);
+                       break;
+               }
+               parse = parse->next;
        }
+}
+
+static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
+{
+       struct tep_print_parse *parse = event->print_fmt.print_cache;
+       struct tep_print_arg *args = NULL;
+       char *bprint_fmt = NULL;
 
        if (event->flags & TEP_EVENT_FL_FAILED) {
-out_failed:
                trace_seq_printf(s, "[FAILED TO PARSE]");
+               tep_print_fields(s, data, size, event);
+               return;
        }
 
-       if (args) {
+       if (event->flags & TEP_EVENT_FL_ISBPRINT) {
+               bprint_fmt = get_bprint_format(data, size, event);
+               args = make_bprint_args(bprint_fmt, data, size, event);
+               parse = parse_args(event, bprint_fmt, args);
+       }
+
+       print_event_cache(parse, s, data, size, event);
+
+       if (event->flags & TEP_EVENT_FL_ISBPRINT) {
+               free_parse_args(parse);
                free_args(args);
                free(bprint_fmt);
        }
@@ -6363,9 +6852,13 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
                        *list = arg;
                        list = &arg->next;
                }
-               return 0;
        }
 
+       if (!(event->flags & TEP_EVENT_FL_ISBPRINT))
+               event->print_fmt.print_cache = parse_args(event,
+                                                         event->print_fmt.format,
+                                                         event->print_fmt.args);
+
        return 0;
 
  event_parse_failed:
@@ -7032,7 +7525,7 @@ void tep_free_event(struct tep_event *event)
 
        free(event->print_fmt.format);
        free_args(event->print_fmt.args);
-
+       free_parse_args(event->print_fmt.print_cache);
        free(event);
 }
 
@@ -7120,6 +7613,7 @@ void tep_free(struct tep_handle *tep)
        free(tep->events);
        free(tep->sort_events);
        free(tep->func_resolver);
+       tep_free_plugin_paths(tep);
 
        free(tep);
 }
index ad7799c..c29b693 100644 (file)
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #ifndef _PARSE_EVENTS_H
 #define _PARSE_EVENTS_H
@@ -272,9 +258,12 @@ struct tep_print_arg {
        };
 };
 
+struct tep_print_parse;
+
 struct tep_print_fmt {
        char                    *format;
        struct tep_print_arg    *args;
+       struct tep_print_parse  *print_cache;
 };
 
 struct tep_event {
@@ -393,14 +382,29 @@ struct tep_plugin_list;
 
 #define INVALID_PLUGIN_LIST_OPTION     ((char **)((unsigned long)-1))
 
+enum tep_plugin_load_priority {
+       TEP_PLUGIN_FIRST,
+       TEP_PLUGIN_LAST,
+};
+
+int tep_add_plugin_path(struct tep_handle *tep, char *path,
+                       enum tep_plugin_load_priority prio);
 struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep);
 void tep_unload_plugins(struct tep_plugin_list *plugin_list,
                        struct tep_handle *tep);
+void tep_load_plugins_hook(struct tep_handle *tep, const char *suffix,
+                          void (*load_plugin)(struct tep_handle *tep,
+                                              const char *path,
+                                              const char *name,
+                                              void *data),
+                          void *data);
 char **tep_plugin_list_options(void);
 void tep_plugin_free_options_list(char **list);
 int tep_plugin_add_options(const char *name,
                           struct tep_plugin_option *options);
+int tep_plugin_add_option(const char *name, const char *val);
 void tep_plugin_remove_options(struct tep_plugin_option *options);
+void tep_plugin_print_options(struct trace_seq *s);
 void tep_print_plugins(struct trace_seq *s,
                        const char *prefix, const char *suffix,
                        const struct tep_plugin_list *list);
index e1f7ddd..e7c2acb 100644 (file)
@@ -13,6 +13,7 @@
 #include <sys/stat.h>
 #include <unistd.h>
 #include <dirent.h>
+#include <errno.h>
 #include "event-parse.h"
 #include "event-parse-local.h"
 #include "event-utils.h"
@@ -38,6 +39,12 @@ struct tep_plugin_list {
        void                    *handle;
 };
 
+struct tep_plugins_dir {
+       struct tep_plugins_dir          *next;
+       char                            *path;
+       enum tep_plugin_load_priority   prio;
+};
+
 static void lower_case(char *str)
 {
        if (!str)
@@ -247,6 +254,170 @@ void tep_plugin_remove_options(struct tep_plugin_option *options)
        }
 }
 
+static int parse_option_name(char **option, char **plugin)
+{
+       char *p;
+
+       *plugin = NULL;
+
+       if ((p = strstr(*option, ":"))) {
+               *plugin = *option;
+               *p = '\0';
+               *option = strdup(p + 1);
+               if (!*option)
+                       return -1;
+       }
+       return 0;
+}
+
+static struct tep_plugin_option *
+find_registered_option(const char *plugin, const char *option)
+{
+       struct registered_plugin_options *reg;
+       struct tep_plugin_option *op;
+       const char *op_plugin;
+
+       for (reg = registered_options; reg; reg = reg->next) {
+               for (op = reg->options; op->name; op++) {
+                       if (op->plugin_alias)
+                               op_plugin = op->plugin_alias;
+                       else
+                               op_plugin = op->file;
+
+                       if (plugin && strcmp(plugin, op_plugin) != 0)
+                               continue;
+                       if (strcmp(option, op->name) != 0)
+                               continue;
+
+                       return op;
+               }
+       }
+
+       return NULL;
+}
+
+static int process_option(const char *plugin, const char *option, const char *val)
+{
+       struct tep_plugin_option *op;
+
+       op = find_registered_option(plugin, option);
+       if (!op)
+               return 0;
+
+       return update_option_value(op, val);
+}
+
+/**
+ * tep_plugin_add_option - add an option/val pair to set plugin options
+ * @name: The name of the option (format: <plugin>:<option> or just <option>)
+ * @val: (optional) the value for the option
+ *
+ * Modify a plugin option. If @val is given than the value of the option
+ * is set (note, some options just take a boolean, so @val must be either
+ * "1" or "0" or "true" or "false").
+ */
+int tep_plugin_add_option(const char *name, const char *val)
+{
+       struct trace_plugin_options *op;
+       char *option_str;
+       char *plugin;
+
+       option_str = strdup(name);
+       if (!option_str)
+               return -ENOMEM;
+
+       if (parse_option_name(&option_str, &plugin) < 0)
+               return -ENOMEM;
+
+       /* If the option exists, update the val */
+       for (op = trace_plugin_options; op; op = op->next) {
+               /* Both must be NULL or not NULL */
+               if ((!plugin || !op->plugin) && plugin != op->plugin)
+                       continue;
+               if (plugin && strcmp(plugin, op->plugin) != 0)
+                       continue;
+               if (strcmp(op->option, option_str) != 0)
+                       continue;
+
+               /* update option */
+               free(op->value);
+               if (val) {
+                       op->value = strdup(val);
+                       if (!op->value)
+                               goto out_free;
+               } else
+                       op->value = NULL;
+
+               /* plugin and option_str don't get freed at the end */
+               free(plugin);
+               free(option_str);
+
+               plugin = op->plugin;
+               option_str = op->option;
+               break;
+       }
+
+       /* If not found, create */
+       if (!op) {
+               op = malloc(sizeof(*op));
+               if (!op)
+                       goto out_free;
+               memset(op, 0, sizeof(*op));
+               op->plugin = plugin;
+               op->option = option_str;
+               if (val) {
+                       op->value = strdup(val);
+                       if (!op->value) {
+                               free(op);
+                               goto out_free;
+                       }
+               }
+               op->next = trace_plugin_options;
+               trace_plugin_options = op;
+       }
+
+       return process_option(plugin, option_str, val);
+
+out_free:
+       free(plugin);
+       free(option_str);
+       return -ENOMEM;
+}
+
+static void print_op_data(struct trace_seq *s, const char *name,
+                         const char *op)
+{
+       if (op)
+               trace_seq_printf(s, "%8s:\t%s\n", name, op);
+}
+
+/**
+ * tep_plugin_print_options - print out the registered plugin options
+ * @s: The trace_seq descriptor to write the plugin options into
+ *
+ * Writes a list of options into trace_seq @s.
+ */
+void tep_plugin_print_options(struct trace_seq *s)
+{
+       struct registered_plugin_options *reg;
+       struct tep_plugin_option *op;
+
+       for (reg = registered_options; reg; reg = reg->next) {
+               if (reg != registered_options)
+                       trace_seq_printf(s, "============\n");
+               for (op = reg->options; op->name; op++) {
+                       if (op != reg->options)
+                               trace_seq_printf(s, "------------\n");
+                       print_op_data(s, "file", op->file);
+                       print_op_data(s, "plugin", op->plugin_alias);
+                       print_op_data(s, "option", op->name);
+                       print_op_data(s, "desc", op->description);
+                       print_op_data(s, "value", op->value);
+                       trace_seq_printf(s, "%8s:\t%d\n", "set", op->set);
+               }
+       }
+}
+
 /**
  * tep_print_plugins - print out the list of plugins loaded
  * @s: the trace_seq descripter to write to
@@ -273,6 +444,7 @@ load_plugin(struct tep_handle *tep, const char *path,
            const char *file, void *data)
 {
        struct tep_plugin_list **plugin_list = data;
+       struct tep_plugin_option *options;
        tep_plugin_load_func func;
        struct tep_plugin_list *list;
        const char *alias;
@@ -297,6 +469,16 @@ load_plugin(struct tep_handle *tep, const char *path,
        if (!alias)
                alias = file;
 
+       options = dlsym(handle, TEP_PLUGIN_OPTIONS_NAME);
+       if (options) {
+               while (options->name) {
+                       ret = update_option(alias, options);
+                       if (ret < 0)
+                               goto out_free;
+                       options++;
+               }
+       }
+
        func = dlsym(handle, TEP_PLUGIN_LOADER_NAME);
        if (!func) {
                warning("could not find func '%s' in plugin '%s'\n%s\n",
@@ -365,28 +547,53 @@ load_plugins_dir(struct tep_handle *tep, const char *suffix,
        closedir(dir);
 }
 
-static void
-load_plugins(struct tep_handle *tep, const char *suffix,
-            void (*load_plugin)(struct tep_handle *tep,
-                                const char *path,
-                                const char *name,
-                                void *data),
-            void *data)
+/**
+ * tep_load_plugins_hook - call a user specified callback to load a plugin
+ * @tep: handler to traceevent context
+ * @suffix: filter only plugin files with given suffix
+ * @load_plugin: user specified callback, called for each plugin file
+ * @data: custom context, passed to @load_plugin
+ *
+ * Searches for traceevent plugin files and calls @load_plugin for each
+ * The order of plugins search is:
+ *  - Directories, specified in @tep->plugins_dir and priority TEP_PLUGIN_FIRST
+ *  - Directory, specified at compile time with PLUGIN_TRACEEVENT_DIR
+ *  - Directory, specified by environment variable TRACEEVENT_PLUGIN_DIR
+ *  - In user's home: ~/.local/lib/traceevent/plugins/
+ *  - Directories, specified in @tep->plugins_dir and priority TEP_PLUGIN_LAST
+ *
+ */
+void tep_load_plugins_hook(struct tep_handle *tep, const char *suffix,
+                          void (*load_plugin)(struct tep_handle *tep,
+                                              const char *path,
+                                              const char *name,
+                                              void *data),
+                          void *data)
 {
+       struct tep_plugins_dir *dir = NULL;
        char *home;
        char *path;
        char *envdir;
        int ret;
 
-       if (tep->flags & TEP_DISABLE_PLUGINS)
+       if (tep && tep->flags & TEP_DISABLE_PLUGINS)
                return;
 
+       if (tep)
+               dir = tep->plugins_dir;
+       while (dir) {
+               if (dir->prio == TEP_PLUGIN_FIRST)
+                       load_plugins_dir(tep, suffix, dir->path,
+                                        load_plugin, data);
+               dir = dir->next;
+       }
+
        /*
         * If a system plugin directory was defined,
         * check that first.
         */
 #ifdef PLUGIN_DIR
-       if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
+       if (!tep || !(tep->flags & TEP_DISABLE_SYS_PLUGINS))
                load_plugins_dir(tep, suffix, PLUGIN_DIR,
                                 load_plugin, data);
 #endif
@@ -415,6 +622,15 @@ load_plugins(struct tep_handle *tep, const char *suffix,
 
        load_plugins_dir(tep, suffix, path, load_plugin, data);
 
+       if (tep)
+               dir = tep->plugins_dir;
+       while (dir) {
+               if (dir->prio == TEP_PLUGIN_LAST)
+                       load_plugins_dir(tep, suffix, dir->path,
+                                        load_plugin, data);
+               dir = dir->next;
+       }
+
        free(path);
 }
 
@@ -423,10 +639,59 @@ tep_load_plugins(struct tep_handle *tep)
 {
        struct tep_plugin_list *list = NULL;
 
-       load_plugins(tep, ".so", load_plugin, &list);
+       tep_load_plugins_hook(tep, ".so", load_plugin, &list);
        return list;
 }
 
+/**
+ * tep_add_plugin_path - Add a new plugin directory.
+ * @tep: Trace event handler.
+ * @path: Path to a directory. All plugin files in that
+ *       directory will be loaded.
+ *@prio: Load priority of the plugins in that directory.
+ *
+ * Returns -1 in case of an error, 0 otherwise.
+ */
+int tep_add_plugin_path(struct tep_handle *tep, char *path,
+                       enum tep_plugin_load_priority prio)
+{
+       struct tep_plugins_dir *dir;
+
+       if (!tep || !path)
+               return -1;
+
+       dir = calloc(1, sizeof(*dir));
+       if (!dir)
+               return -1;
+
+       dir->path = strdup(path);
+       if (!dir->path) {
+               free(dir);
+               return -1;
+       }
+       dir->prio = prio;
+       dir->next = tep->plugins_dir;
+       tep->plugins_dir = dir;
+
+       return 0;
+}
+
+void tep_free_plugin_paths(struct tep_handle *tep)
+{
+       struct tep_plugins_dir *dir;
+
+       if (!tep)
+               return;
+
+       dir = tep->plugins_dir;
+       while (dir) {
+               tep->plugins_dir = tep->plugins_dir->next;
+               free(dir->path);
+               free(dir);
+               dir = tep->plugins_dir;
+       }
+}
+
 void
 tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
 {
index 5fa8292..a2b5220 100644 (file)
@@ -1,22 +1,7 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
 /*
  * Copyright (C) 2012 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #ifndef _KBUFFER_H
 #define _KBUFFER_H
index 210d269..dd4da82 100644 (file)
@@ -5,6 +5,8 @@ plugin_kvm-y          += plugin_kvm.o
 plugin_mac80211-y     += plugin_mac80211.o
 plugin_sched_switch-y += plugin_sched_switch.o
 plugin_function-y     += plugin_function.o
+plugin_futex-y        += plugin_futex.o
 plugin_xen-y          += plugin_xen.o
 plugin_scsi-y         += plugin_scsi.o
 plugin_cfg80211-y     += plugin_cfg80211.o
+plugin_tlb-y          += plugin_tlb.o
\ No newline at end of file
index 680d883..47e8025 100644 (file)
@@ -134,9 +134,11 @@ PLUGINS += plugin_kvm.so
 PLUGINS += plugin_mac80211.so
 PLUGINS += plugin_sched_switch.so
 PLUGINS += plugin_function.so
+PLUGINS += plugin_futex.so
 PLUGINS += plugin_xen.so
 PLUGINS += plugin_scsi.so
 PLUGINS += plugin_cfg80211.so
+PLUGINS += plugin_tlb.so
 
 PLUGINS    := $(addprefix $(OUTPUT),$(PLUGINS))
 PLUGINS_IN := $(PLUGINS:.so=-in.o)
index 7770fcb..807b16e 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
@@ -50,12 +35,20 @@ struct tep_plugin_option plugin_options[] =
                .set = 1,
        },
        {
+               .name = "offset",
+               .plugin_alias = "ftrace",
+               .description =
+               "Show function names as well as their offsets",
+               .set = 0,
+       },
+       {
                .name = NULL,
        }
 };
 
 static struct tep_plugin_option *ftrace_parent = &plugin_options[0];
 static struct tep_plugin_option *ftrace_indent = &plugin_options[1];
+static struct tep_plugin_option *ftrace_offset = &plugin_options[2];
 
 static void add_child(struct func_stack *stack, const char *child, int pos)
 {
@@ -123,6 +116,18 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
        return 0;
 }
 
+static void show_function(struct trace_seq *s, struct tep_handle *tep,
+                         const char *func, unsigned long long function)
+{
+       unsigned long long offset;
+
+       trace_seq_printf(s, "%s", func);
+       if (ftrace_offset->set) {
+               offset = tep_find_function_address(tep, function);
+               trace_seq_printf(s, "+0x%x ", (int)(function - offset));
+       }
+}
+
 static int function_handler(struct trace_seq *s, struct tep_record *record,
                            struct tep_event *event, void *context)
 {
@@ -149,14 +154,14 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
        trace_seq_printf(s, "%*s", index*3, "");
 
        if (func)
-               trace_seq_printf(s, "%s", func);
+               show_function(s, tep, func, function);
        else
                trace_seq_printf(s, "0x%llx", function);
 
        if (ftrace_parent->set) {
                trace_seq_printf(s, " <-- ");
                if (parent)
-                       trace_seq_printf(s, "%s", parent);
+                       show_function(s, tep, parent, pfunction);
                else
                        trace_seq_printf(s, "0x%llx", pfunction);
        }
@@ -164,11 +169,93 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
        return 0;
 }
 
+static int
+trace_stack_handler(struct trace_seq *s, struct tep_record *record,
+                   struct tep_event *event, void *context)
+{
+       struct tep_format_field *field;
+       unsigned long long addr;
+       const char *func;
+       int long_size;
+       void *data = record->data;
+
+       field = tep_find_any_field(event, "caller");
+       if (!field) {
+               trace_seq_printf(s, "<CANT FIND FIELD %s>", "caller");
+               return 0;
+       }
+
+       trace_seq_puts(s, "<stack trace >\n");
+
+       long_size = tep_get_long_size(event->tep);
+
+       for (data += field->offset; data < record->data + record->size;
+            data += long_size) {
+               addr = tep_read_number(event->tep, data, long_size);
+
+               if ((long_size == 8 && addr == (unsigned long long)-1) ||
+                   ((int)addr == -1))
+                       break;
+
+               func = tep_find_function(event->tep, addr);
+               if (func)
+                       trace_seq_printf(s, "=> %s (%llx)\n", func, addr);
+               else
+                       trace_seq_printf(s, "=> %llx\n", addr);
+       }
+
+       return 0;
+}
+
+static int
+trace_raw_data_handler(struct trace_seq *s, struct tep_record *record,
+                   struct tep_event *event, void *context)
+{
+       struct tep_format_field *field;
+       unsigned long long id;
+       int long_size;
+       void *data = record->data;
+
+       if (tep_get_field_val(s, event, "id", record, &id, 1))
+               return trace_seq_putc(s, '!');
+
+       trace_seq_printf(s, "# %llx", id);
+
+       field = tep_find_any_field(event, "buf");
+       if (!field) {
+               trace_seq_printf(s, "<CANT FIND FIELD %s>", "buf");
+               return 0;
+       }
+
+       long_size = tep_get_long_size(event->tep);
+
+       for (data += field->offset; data < record->data + record->size;
+            data += long_size) {
+               int size = sizeof(long);
+               int left = (record->data + record->size) - data;
+               int i;
+
+               if (size > left)
+                       size = left;
+
+               for (i = 0; i < size; i++)
+                       trace_seq_printf(s, " %02x", *(unsigned char *)(data + i));
+       }
+
+       return 0;
+}
+
 int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
        tep_register_event_handler(tep, -1, "ftrace", "function",
                                   function_handler, NULL);
 
+       tep_register_event_handler(tep, -1, "ftrace", "kernel_stack",
+                                     trace_stack_handler, NULL);
+
+       tep_register_event_handler(tep, -1, "ftrace", "raw_data",
+                                     trace_raw_data_handler, NULL);
+
        tep_plugin_add_options("ftrace", plugin_options);
 
        return 0;
diff --git a/tools/lib/traceevent/plugins/plugin_futex.c b/tools/lib/traceevent/plugins/plugin_futex.c
new file mode 100644 (file)
index 0000000..eb7c9f8
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) 2017 National Instruments Corp.
+ *
+ * Author: Julia Cartwright <julia@ni.com>
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/futex.h>
+
+#include "event-parse.h"
+
+#define ARRAY_SIZE(_a) (sizeof(_a) / sizeof((_a)[0]))
+
+struct futex_args {
+       unsigned long long      uaddr;
+       unsigned long long      op;
+       unsigned long long      val;
+       unsigned long long      utime; /* or val2 */
+       unsigned long long      uaddr2;
+       unsigned long long      val3;
+};
+
+struct futex_op {
+       const char      *name;
+       const char      *fmt_val;
+       const char      *fmt_utime;
+       const char      *fmt_uaddr2;
+       const char      *fmt_val3;
+};
+
+static const struct futex_op futex_op_tbl[] = {
+       {            "FUTEX_WAIT", " val=0x%08llx", " utime=0x%08llx",               NULL,             NULL },
+       {            "FUTEX_WAKE",     " val=%llu",              NULL,               NULL,             NULL },
+       {              "FUTEX_FD",     " val=%llu",              NULL,               NULL,             NULL },
+       {         "FUTEX_REQUEUE",     " val=%llu",      " val2=%llu", " uaddr2=0x%08llx",             NULL },
+       {     "FUTEX_CMP_REQUEUE",     " val=%llu",      " val2=%llu", " uaddr2=0x%08llx", " val3=0x%08llx" },
+       {         "FUTEX_WAKE_OP",     " val=%llu",      " val2=%llu", " uaddr2=0x%08llx", " val3=0x%08llx" },
+       {         "FUTEX_LOCK_PI",            NULL, " utime=0x%08llx",               NULL,             NULL },
+       {       "FUTEX_UNLOCK_PI",            NULL,              NULL,               NULL,             NULL },
+       {      "FUTEX_TRYLOCK_PI",            NULL,              NULL,               NULL,             NULL },
+       {     "FUTEX_WAIT_BITSET", " val=0x%08llx", " utime=0x%08llx",               NULL, " val3=0x%08llx" },
+       {     "FUTEX_WAKE_BITSET",     " val=%llu",              NULL,               NULL, " val3=0x%08llx" },
+       { "FUTEX_WAIT_REQUEUE_PI", " val=0x%08llx", " utime=0x%08llx", " uaddr2=0x%08llx", " val3=0x%08llx" },
+       {  "FUTEX_CMP_REQUEUE_PI",     " val=%llu",      " val2=%llu", " uaddr2=0x%08llx", " val3=0x%08llx" },
+};
+
+
+static void futex_print(struct trace_seq *s, const struct futex_args *args,
+                       const struct futex_op *fop)
+{
+       trace_seq_printf(s, " uaddr=0x%08llx", args->uaddr);
+
+       if (fop->fmt_val)
+               trace_seq_printf(s, fop->fmt_val, args->val);
+
+       if (fop->fmt_utime)
+               trace_seq_printf(s,fop->fmt_utime, args->utime);
+
+       if (fop->fmt_uaddr2)
+               trace_seq_printf(s, fop->fmt_uaddr2, args->uaddr2);
+
+       if (fop->fmt_val3)
+               trace_seq_printf(s, fop->fmt_val3, args->val3);
+}
+
+static int futex_handler(struct trace_seq *s, struct tep_record *record,
+                        struct tep_event *event, void *context)
+{
+       const struct futex_op *fop;
+       struct futex_args args;
+       unsigned long long cmd;
+
+       if (tep_get_field_val(s, event, "uaddr", record, &args.uaddr, 1))
+               return 1;
+
+       if (tep_get_field_val(s, event, "op", record, &args.op, 1))
+               return 1;
+
+       if (tep_get_field_val(s, event, "val", record, &args.val, 1))
+               return 1;
+
+       if (tep_get_field_val(s, event, "utime", record, &args.utime, 1))
+               return 1;
+
+       if (tep_get_field_val(s, event, "uaddr2", record, &args.uaddr2, 1))
+               return 1;
+
+       if (tep_get_field_val(s, event, "val3", record, &args.val3, 1))
+               return 1;
+
+       cmd = args.op & FUTEX_CMD_MASK;
+       if (cmd >= ARRAY_SIZE(futex_op_tbl))
+               return 1;
+
+       fop = &futex_op_tbl[cmd];
+
+       trace_seq_printf(s, "op=%s", fop->name);
+
+       if (args.op & FUTEX_PRIVATE_FLAG)
+               trace_seq_puts(s, "|FUTEX_PRIVATE_FLAG");
+
+       if (args.op & FUTEX_CLOCK_REALTIME)
+               trace_seq_puts(s, "|FUTEX_CLOCK_REALTIME");
+
+       futex_print(s, &args, fop);
+       return 0;
+}
+
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
+{
+       tep_register_event_handler(tep, -1, "syscalls", "sys_enter_futex",
+                                  futex_handler, NULL);
+       return 0;
+}
+
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
+{
+       tep_unregister_event_handler(tep, -1, "syscalls", "sys_enter_futex",
+                                    futex_handler, NULL);
+}
index bb434e0..d984667 100644 (file)
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index 04fc125..69111a6 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index edaec5d..4b4f7f9 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index c8e6230..51ceeb9 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
@@ -155,7 +140,23 @@ static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
        _ER(EXIT_WRITE_DR5,     0x035)          \
        _ER(EXIT_WRITE_DR6,     0x036)          \
        _ER(EXIT_WRITE_DR7,     0x037)          \
-       _ER(EXIT_EXCP_BASE,     0x040)          \
+       _ER(EXIT_EXCP_DE,       0x040)          \
+       _ER(EXIT_EXCP_DB,       0x041)          \
+       _ER(EXIT_EXCP_BP,       0x043)          \
+       _ER(EXIT_EXCP_OF,       0x044)          \
+       _ER(EXIT_EXCP_BR,       0x045)          \
+       _ER(EXIT_EXCP_UD,       0x046)          \
+       _ER(EXIT_EXCP_NM,       0x047)          \
+       _ER(EXIT_EXCP_DF,       0x048)          \
+       _ER(EXIT_EXCP_TS,       0x04a)          \
+       _ER(EXIT_EXCP_NP,       0x04b)          \
+       _ER(EXIT_EXCP_SS,       0x04c)          \
+       _ER(EXIT_EXCP_GP,       0x04d)          \
+       _ER(EXIT_EXCP_PF,       0x04e)          \
+       _ER(EXIT_EXCP_MF,       0x050)          \
+       _ER(EXIT_EXCP_AC,       0x051)          \
+       _ER(EXIT_EXCP_MC,       0x052)          \
+       _ER(EXIT_EXCP_XF,       0x053)          \
        _ER(EXIT_INTR,          0x060)          \
        _ER(EXIT_NMI,           0x061)          \
        _ER(EXIT_SMI,           0x062)          \
@@ -201,7 +202,10 @@ static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
        _ER(EXIT_MONITOR,       0x08a)          \
        _ER(EXIT_MWAIT,         0x08b)          \
        _ER(EXIT_MWAIT_COND,    0x08c)          \
-       _ER(EXIT_NPF,           0x400)          \
+       _ER(EXIT_XSETBV,        0x08d)          \
+       _ER(EXIT_NPF,           0x400)          \
+       _ER(EXIT_AVIC_INCOMPLETE_IPI,           0x401)  \
+       _ER(EXIT_AVIC_UNACCELERATED_ACCESS,     0x402)  \
        _ER(EXIT_ERR,           -1)
 
 #define _ER(reason, val)       { #reason, val },
@@ -241,7 +245,7 @@ static const char *find_exit_reason(unsigned isa, int val)
                }
        if (!strings)
                return "UNKNOWN-ISA";
-       for (i = 0; strings[i].val >= 0; i++)
+       for (i = 0; strings[i].str; i++)
                if (strings[i].val == val)
                        break;
 
index 884303c..f48071e 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index 957389a..e12fa10 100644 (file)
@@ -1,21 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
diff --git a/tools/lib/traceevent/plugins/plugin_tlb.c b/tools/lib/traceevent/plugins/plugin_tlb.c
new file mode 100644 (file)
index 0000000..43657fb
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) 2015 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "event-parse.h"
+
+enum tlb_flush_reason {
+       TLB_FLUSH_ON_TASK_SWITCH,
+       TLB_REMOTE_SHOOTDOWN,
+       TLB_LOCAL_SHOOTDOWN,
+       TLB_LOCAL_MM_SHOOTDOWN,
+       NR_TLB_FLUSH_REASONS,
+};
+
+static int tlb_flush_handler(struct trace_seq *s, struct tep_record *record,
+                            struct tep_event *event, void *context)
+{
+       unsigned long long val;
+
+       trace_seq_printf(s, "pages=");
+
+       tep_print_num_field(s, "%ld", event, "pages", record, 1);
+
+       if (tep_get_field_val(s, event, "reason", record, &val, 1) < 0)
+               return -1;
+
+       trace_seq_puts(s, " reason=");
+
+       switch (val) {
+       case TLB_FLUSH_ON_TASK_SWITCH:
+               trace_seq_puts(s, "flush on task switch");
+               break;
+       case TLB_REMOTE_SHOOTDOWN:
+               trace_seq_puts(s, "remote shootdown");
+               break;
+       case TLB_LOCAL_SHOOTDOWN:
+               trace_seq_puts(s, "local shootdown");
+               break;
+       case TLB_LOCAL_MM_SHOOTDOWN:
+               trace_seq_puts(s, "local mm shootdown");
+               break;
+       }
+
+       trace_seq_printf(s, " (%lld)", val);
+
+       return 0;
+}
+
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
+{
+       tep_register_event_handler(tep, -1, "tlb", "tlb_flush",
+                                  tlb_flush_handler, NULL);
+
+       return 0;
+}
+
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
+{
+       tep_unregister_event_handler(tep, -1,
+                                    "tlb", "tlb_flush",
+                                    tlb_flush_handler, NULL);
+}
index e817179..d3740c8 100644 (file)
@@ -18,6 +18,7 @@
                l       synthesize last branch entries (use with i or x)
                L       synthesize last branch entries on existing event records
                s       skip initial number of events
+               q       quicker (less detailed) decoding
 
        The default is all events i.e. the same as --itrace=ibxwpe,
        except for perf script where it is --itrace=ce
        --itrace=i0nss1000000
 
        skips the first million instructions.
+
+       The 'e' option may be followed by flags which affect what errors will or
+       will not be reported. Each flag must be preceded by either '+' or '-'.
+       The flags are:
+               o       overflow
+               l       trace data lost
+
+       If supported, the 'd' option may be followed by flags which affect what
+       debug messages will or will not be logged. Each flag must be preceded
+       by either '+' or '-'. The flags are:
+               a       all perf events
+
+       If supported, the 'q' option may be repeated to increase the effect.
index bad1651..a0529c7 100644 (file)
@@ -49,6 +49,9 @@ SUBSYSTEM
 'sched'::
        Scheduler and IPC mechanisms.
 
+'syscall'::
+       System call performance (throughput).
+
 'mem'::
        Memory access performance.
 
@@ -137,6 +140,14 @@ Example of *pipe*
                 59004 ops/sec
 ---------------------
 
+SUITES FOR 'syscall'
+~~~~~~~~~~~~~~~~~~
+*basic*::
+Suite for evaluating performance of core system call throughput (both usecs/op and ops/sec metrics).
+This uses a single thread simply doing getppid(2), which is a simple syscall where the result is not
+cached by glibc.
+
+
 SUITES FOR 'mem'
 ~~~~~~~~~~~~~~~~
 *memcpy*::
index c7d3df5..76408d9 100644 (file)
@@ -614,8 +614,9 @@ trace.*::
 
 ftrace.*::
        ftrace.tracer::
-               Can be used to select the default tracer. Possible values are
-               'function' and 'function_graph'.
+               Can be used to select the default tracer when neither -G nor
+               -F option is not specified. Possible values are 'function' and
+               'function_graph'.
 
 llvm.*::
        llvm.clang-path::
index c871807..726b9bc 100644 (file)
@@ -27,6 +27,9 @@ OPTIONS for 'convert'
 --to-ctf::
        Triggers the CTF conversion, specify the path of CTF data directory.
 
+--tod::
+       Convert time to wall clock time.
+
 -i::
        Specify input perf data file path.
 
index b80c843..78358af 100644 (file)
@@ -24,16 +24,28 @@ OPTIONS
 
 -t::
 --tracer=::
-       Tracer to use: function_graph or function.
+       Tracer to use when neither -G nor -F option is not
+       specified: function_graph or function.
 
 -v::
 --verbose=::
         Verbosity level.
 
+-F::
+--funcs::
+        List all available functions to trace.
+
 -p::
 --pid=::
        Trace on existing process id (comma separated list).
 
+--tid=::
+       Trace on existing thread id (comma separated list).
+
+-D::
+--delay::
+       Time (ms) to wait before starting tracing after program start.
+
 -a::
 --all-cpus::
        Force system-wide collection.  Scripts run without a <command>
@@ -48,39 +60,58 @@ OPTIONS
        Ranges of CPUs are specified with -: 0-2.
        Default is to trace on all online CPUs.
 
+-m::
+--buffer-size::
+       Set the size of per-cpu tracing buffer, <size> is expected to
+       be a number with appended unit character - B/K/M/G.
+
+--inherit::
+       Trace children processes spawned by our target.
+
 -T::
 --trace-funcs=::
-       Only trace functions given by the argument.  Multiple functions
-       can be given by using this option more than once.  The function
-       argument also can be a glob pattern.  It will be passed to
-       'set_ftrace_filter' in tracefs.
+       Select function tracer and set function filter on the given
+       function (or a glob pattern). Multiple functions can be given
+       by using this option more than once. The function argument also
+       can be a glob pattern. It will be passed to 'set_ftrace_filter'
+       in tracefs.
 
 -N::
 --notrace-funcs=::
-       Do not trace functions given by the argument.  Like -T option,
-       this can be used more than once to specify multiple functions
-       (or glob patterns).  It will be passed to 'set_ftrace_notrace'
-       in tracefs.
+       Select function tracer and do not trace functions given by the
+       argument.  Like -T option, this can be used more than once to
+       specify multiple functions (or glob patterns).  It will be
+       passed to 'set_ftrace_notrace' in tracefs.
+
+--func-opts::
+       List of options allowed to set:
+         call-graph - Display kernel stack trace for function tracer.
+         irq-info   - Display irq context info for function tracer.
 
 -G::
 --graph-funcs=::
-       Set graph filter on the given function (or a glob pattern).
-       This is useful for the function_graph tracer only and enables
-       tracing for functions executed from the given function.
-       This can be used more than once to specify multiple functions.
-       It will be passed to 'set_graph_function' in tracefs.
+       Select function_graph tracer and set graph filter on the given
+       function (or a glob pattern). This is useful to trace for
+       functions executed from the given function. This can be used more
+       than once to specify multiple functions. It will be passed to
+       'set_graph_function' in tracefs.
 
 -g::
 --nograph-funcs=::
-       Set graph notrace filter on the given function (or a glob pattern).
-       Like -G option, this is useful for the function_graph tracer only
-       and disables tracing for function executed from the given function.
-       This can be used more than once to specify multiple functions.
-       It will be passed to 'set_graph_notrace' in tracefs.
+       Select function_graph tracer and set graph notrace filter on the
+       given function (or a glob pattern). Like -G option, this is useful
+       for the function_graph tracer only and disables tracing for function
+       executed from the given function. This can be used more than once to
+       specify multiple functions. It will be passed to 'set_graph_notrace'
+       in tracefs.
 
--D::
---graph-depth=::
-       Set max depth for function graph tracer to follow
+--graph-opts::
+       List of options allowed to set:
+         nosleep-time - Measure on-CPU time only for function_graph tracer.
+         noirqs       - Ignore functions that happen inside interrupt.
+         verbose      - Show process names, PIDs, timestamps, etc.
+         thresh=<n>   - Setup trace duration threshold in microseconds.
+         depth=<n>    - Set max depth for function graph tracer to follow.
 
 SEE ALSO
 --------
index f4cd49a..d5a266d 100644 (file)
@@ -825,6 +825,7 @@ The letters are:
        l       synthesize last branch entries (use with i or x)
        L       synthesize last branch entries on existing event records
        s       skip initial number of events
+       q       quicker (less detailed) decoding
 
 "Instructions" events look like they were recorded by "perf record -e
 instructions".
@@ -871,11 +872,24 @@ Developer Manuals.
 
 Error events show where the decoder lost the trace.  Error events
 are quite important.  Users must know if what they are seeing is a complete
-picture or not.
+picture or not. The "e" option may be followed by flags which affect what errors
+will or will not be reported.  Each flag must be preceded by either '+' or '-'.
+The flags supported by Intel PT are:
+               -o      Suppress overflow errors
+               -l      Suppress trace data lost errors
+For example, for errors but not overflow or data lost errors:
+
+       --itrace=e-o-l
 
 The "d" option will cause the creation of a file "intel_pt.log" containing all
 decoded packets and instructions.  Note that this option slows down the decoder
-and that the resulting file may be very large.
+and that the resulting file may be very large.  The "d" option may be followed
+by flags which affect what debug messages will or will not be logged. Each flag
+must be preceded by either '+' or '-'. The flags support by Intel PT are:
+               -a      Suppress logging of perf events
+               +a      Log all perf events
+By default, logged perf events are filtered by any specified time ranges, but
+flag +a overrides that.
 
 In addition, the period of the "instructions" event can be specified. e.g.
 
@@ -956,6 +970,51 @@ at the beginning. This is useful to ignore initialization code.
 
 skips the first million instructions.
 
+The q option changes the way the trace is decoded.  The decoding is much faster
+but much less detailed.  Specifically, with the q option, the decoder does not
+decode TNT packets, and does not walk object code, but gets the ip from FUP and
+TIP packets.  The q option can be used with the b and i options but the period
+is not used.  The q option decodes more quickly, but is useful only if the
+control flow of interest is represented or indicated by FUP, TIP, TIP.PGE, or
+TIP.PGD packets (refer below).  However the q option could be used to find time
+ranges that could then be decoded fully using the --time option.
+
+What will *not* be decoded with the (single) q option:
+
+       - direct calls and jmps
+       - conditional branches
+       - non-branch instructions
+
+What *will* be decoded with the (single) q option:
+
+       - asynchronous branches such as interrupts
+       - indirect branches
+       - function return target address *if* the noretcomp config term (refer
+       config terms section) was used
+       - start of (control-flow) tracing
+       - end of (control-flow) tracing, if it is not out of context
+       - power events, ptwrite, transaction start and abort
+       - instruction pointer associated with PSB packets
+
+Note the q option does not specify what events will be synthesized e.g. the p
+option must be used also to show power events.
+
+Repeating the q option (double-q i.e. qq) results in even faster decoding and even
+less detail.  The decoder decodes only extended PSB (PSB+) packets, getting the
+instruction pointer if there is a FUP packet within PSB+ (i.e. between PSB and
+PSBEND).  Note PSB packets occur regularly in the trace based on the psb_period
+config term (refer config terms section).  There will be a FUP packet if the
+PSB+ occurs while control flow is being traced.
+
+What will *not* be decoded with the qq option:
+
+       - everything except instruction pointer associated with PSB packets
+
+What *will* be decoded with the qq option:
+
+       - instruction pointer associated with PSB packets
+
+
 dump option
 ~~~~~~~~~~~
 
index 376a50b..10ed539 100644 (file)
@@ -119,6 +119,7 @@ It's also possible to use pmu syntax:
 
  perf record -e r1a8 -a sleep 1
  perf record -e cpu/r1a8/ ...
+ perf record -e cpu/r0x1a8/ ...
 
 You should refer to the processor specific documentation for getting these
 details. Some of them are referenced in the SEE ALSO section below.
index fa8a5fc..3f72d8e 100644 (file)
@@ -407,8 +407,9 @@ if combined with -a or -C options.
 
 -D::
 --delay=::
-After starting the program, wait msecs before measuring. This is useful to
-filter out the startup phase of the program, which is often very different.
+After starting the program, wait msecs before measuring (-1: start with events
+disabled). This is useful to filter out the startup phase of the program, which
+is often very different.
 
 -I::
 --intr-regs::
@@ -626,6 +627,45 @@ option. The -e option and this one can be mixed and matched.  Events
 can be grouped using the {} notation.
 endif::HAVE_LIBPFM[]
 
+--control fd:ctl-fd[,ack-fd]
+Listen on ctl-fd descriptor for command to control measurement ('enable': enable events,
+'disable': disable events). Measurements can be started with events disabled using
+--delay=-1 option. Optionally send control command completion ('ack\n') to ack-fd descriptor
+to synchronize with the controlling process. Example of bash shell script to enable and
+disable events during measurements:
+
+#!/bin/bash
+
+ctl_dir=/tmp/
+
+ctl_fifo=${ctl_dir}perf_ctl.fifo
+test -p ${ctl_fifo} && unlink ${ctl_fifo}
+mkfifo ${ctl_fifo}
+exec {ctl_fd}<>${ctl_fifo}
+
+ctl_ack_fifo=${ctl_dir}perf_ctl_ack.fifo
+test -p ${ctl_ack_fifo} && unlink ${ctl_ack_fifo}
+mkfifo ${ctl_ack_fifo}
+exec {ctl_fd_ack}<>${ctl_ack_fifo}
+
+perf record -D -1 -e cpu-cycles -a               \
+            --control fd:${ctl_fd},${ctl_fd_ack} \
+            -- sleep 30 &
+perf_pid=$!
+
+sleep 5  && echo 'enable' >&${ctl_fd} && read -u ${ctl_fd_ack} e1 && echo "enabled(${e1})"
+sleep 10 && echo 'disable' >&${ctl_fd} && read -u ${ctl_fd_ack} d1 && echo "disabled(${d1})"
+
+exec {ctl_fd_ack}>&-
+unlink ${ctl_ack_fifo}
+
+exec {ctl_fd}>&-
+unlink ${ctl_fifo}
+
+wait -n ${perf_pid}
+exit $?
+
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
index 372dfd1..4f712fb 100644 (file)
@@ -322,6 +322,10 @@ OPTIONS
 --show-cgroup-events
        Display cgroup events i.e. events of type PERF_RECORD_CGROUP.
 
+--show-text-poke-events
+       Display text poke events i.e. events of type PERF_RECORD_TEXT_POKE and
+       PERF_RECORD_KSYMBOL.
+
 --demangle::
        Demangle symbol names to human readable form. It's enabled by default,
        disable with --no-demangle.
index b029ee7..c9bfefc 100644 (file)
@@ -176,6 +176,45 @@ with it.  --append may be used here.  Examples:
      3>results  perf stat --log-fd 3          -- $cmd
      3>>results perf stat --log-fd 3 --append -- $cmd
 
+--control fd:ctl-fd[,ack-fd]
+Listen on ctl-fd descriptor for command to control measurement ('enable': enable events,
+'disable': disable events). Measurements can be started with events disabled using
+--delay=-1 option. Optionally send control command completion ('ack\n') to ack-fd descriptor
+to synchronize with the controlling process. Example of bash shell script to enable and
+disable events during measurements:
+
+#!/bin/bash
+
+ctl_dir=/tmp/
+
+ctl_fifo=${ctl_dir}perf_ctl.fifo
+test -p ${ctl_fifo} && unlink ${ctl_fifo}
+mkfifo ${ctl_fifo}
+exec {ctl_fd}<>${ctl_fifo}
+
+ctl_ack_fifo=${ctl_dir}perf_ctl_ack.fifo
+test -p ${ctl_ack_fifo} && unlink ${ctl_ack_fifo}
+mkfifo ${ctl_ack_fifo}
+exec {ctl_fd_ack}<>${ctl_ack_fifo}
+
+perf stat -D -1 -e cpu-cycles -a -I 1000       \
+          --control fd:${ctl_fd},${ctl_fd_ack} \
+          -- sleep 30 &
+perf_pid=$!
+
+sleep 5  && echo 'enable' >&${ctl_fd} && read -u ${ctl_fd_ack} e1 && echo "enabled(${e1})"
+sleep 10 && echo 'disable' >&${ctl_fd} && read -u ${ctl_fd_ack} d1 && echo "disabled(${d1})"
+
+exec {ctl_fd_ack}>&-
+unlink ${ctl_ack_fifo}
+
+exec {ctl_fd}>&-
+unlink ${ctl_fifo}
+
+wait -n ${perf_pid}
+exit $?
+
+
 --pre::
 --post::
        Pre and post measurement hooks, e.g.:
@@ -238,8 +277,9 @@ mode, use --per-node in addition to -a. (system-wide).
 
 -D msecs::
 --delay msecs::
-After starting the program, wait msecs before measuring. This is useful to
-filter out the startup phase of the program, which is often very different.
+After starting the program, wait msecs before measuring (-1: start with events
+disabled). This is useful to filter out the startup phase of the program,
+which is often very different.
 
 -T::
 --transaction::
index b6472e4..9ee9664 100644 (file)
@@ -389,6 +389,19 @@ struct {
 Example:
  cpu pmu capabilities: branches=32, max_precise=3, pmu_name=icelake
 
+       HEADER_CLOCK_DATA = 29,
+
+       Contains clock id and its reference time together with wall clock
+       time taken at the 'same time', both values are in nanoseconds.
+       The format of data is as below.
+
+struct {
+       u32 version;  /* version = 1 */
+       u32 clockid;
+       u64 wall_clock_ns;
+       u64 clockid_time_ns;
+};
+
        other bits are reserved and should ignored for now
        HEADER_FEAT_BITS        = 256,
 
index 5136338..190be4f 100644 (file)
@@ -501,6 +501,14 @@ ifndef NO_LIBELF
     CFLAGS += -DHAVE_ELF_GETSHDRSTRNDX_SUPPORT
   endif
 
+  ifndef NO_LIBDEBUGINFOD
+    $(call feature_check,libdebuginfod)
+    ifeq ($(feature-libdebuginfod), 1)
+      CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT
+      EXTLIBS += -ldebuginfod
+    endif
+  endif
+
   ifndef NO_DWARF
     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
       msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
index 86dbb51..6031167 100644 (file)
@@ -124,6 +124,8 @@ include ../scripts/utilities.mak
 #
 # Define LIBPFM4 to enable libpfm4 events extension.
 #
+# Define NO_LIBDEBUGINFOD if you do not want support debuginfod
+#
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -418,6 +420,7 @@ export INSTALL SHELL_PATH
 
 SHELL = $(SHELL_PATH)
 
+beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/
 linux_uapi_dir := $(srctree)/tools/include/uapi/linux
 asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
 arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
@@ -501,6 +504,12 @@ socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
 $(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
        $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
 
+socket_arrays := $(beauty_outdir)/socket_arrays.c
+socket_tbl := $(srctree)/tools/perf/trace/beauty/socket.sh
+
+$(socket_arrays): $(beauty_linux_dir)/socket.h $(socket_tbl)
+       $(Q)$(SHELL) '$(socket_tbl)' $(beauty_linux_dir) > $@
+
 vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
 vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
 vhost_virtio_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/vhost_virtio_ioctl.sh
@@ -697,6 +706,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
        $(kcmp_type_array) \
        $(kvm_ioctl_array) \
        $(socket_ipproto_array) \
+       $(socket_arrays) \
        $(vhost_virtio_ioctl_array) \
        $(madvise_behavior_array) \
        $(mmap_flags_array) \
@@ -1006,6 +1016,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
                $(OUTPUT)$(kvm_ioctl_array) \
                $(OUTPUT)$(kcmp_type_array) \
                $(OUTPUT)$(socket_ipproto_array) \
+               $(OUTPUT)$(socket_arrays) \
                $(OUTPUT)$(vhost_virtio_ioctl_array) \
                $(OUTPUT)$(perf_ioctl_array) \
                $(OUTPUT)$(prctl_option_array) \
index 28a5d0c..b187bdd 100644 (file)
@@ -57,17 +57,15 @@ struct auxtrace_record
        struct evsel *evsel;
        bool found_etm = false;
        struct perf_pmu *found_spe = NULL;
-       static struct perf_pmu **arm_spe_pmus = NULL;
-       static int nr_spes = 0;
+       struct perf_pmu **arm_spe_pmus = NULL;
+       int nr_spes = 0;
        int i = 0;
 
        if (!evlist)
                return NULL;
 
        cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
-
-       if (!arm_spe_pmus)
-               arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+       arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
 
        evlist__for_each_entry(evlist, evsel) {
                if (cs_etm_pmu &&
@@ -84,6 +82,7 @@ struct auxtrace_record
                        }
                }
        }
+       free(arm_spe_pmus);
 
        if (found_etm && found_spe) {
                pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
index cea5e33..cad7bf7 100644 (file)
@@ -243,10 +243,10 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
        }
 
        /*
-        * No sink was provided on the command line - for _now_ treat
-        * this as an error.
+        * No sink was provided on the command line - allow the CoreSight
+        * system to look for a default
         */
-       return ret;
+       return 0;
 }
 
 static int cs_etm_recording_options(struct auxtrace_record *itr,
index b190f2e..3ca6fe0 100644 (file)
 146    common  writev                          sys_writev                      compat_sys_writev
 147    common  getsid                          sys_getsid
 148    common  fdatasync                       sys_fdatasync
-149    nospu   _sysctl                         sys_sysctl                      compat_sys_sysctl
+149    nospu   _sysctl                         sys_ni_syscall
 150    common  mlock                           sys_mlock
 151    common  munlock                         sys_munlock
 152    common  mlockall                        sys_mlockall
index e18a355..63f3ac9 100644 (file)
@@ -64,7 +64,13 @@ static const char *reg_names[] = {
        [PERF_REG_POWERPC_DAR] = "dar",
        [PERF_REG_POWERPC_DSISR] = "dsisr",
        [PERF_REG_POWERPC_SIER] = "sier",
-       [PERF_REG_POWERPC_MMCRA] = "mmcra"
+       [PERF_REG_POWERPC_MMCRA] = "mmcra",
+       [PERF_REG_POWERPC_MMCR0] = "mmcr0",
+       [PERF_REG_POWERPC_MMCR1] = "mmcr1",
+       [PERF_REG_POWERPC_MMCR2] = "mmcr2",
+       [PERF_REG_POWERPC_MMCR3] = "mmcr3",
+       [PERF_REG_POWERPC_SIER2] = "sier2",
+       [PERF_REG_POWERPC_SIER3] = "sier3",
 };
 
 static inline const char *perf_reg_name(int id)
index d487007..1a95017 100644 (file)
@@ -7,17 +7,10 @@
 #include <string.h>
 #include <linux/stringify.h>
 #include "header.h"
+#include "utils_header.h"
 #include "metricgroup.h"
 #include <api/fs/fs.h>
 
-#define mfspr(rn)       ({unsigned long rval; \
-                        asm volatile("mfspr %0," __stringify(rn) \
-                                     : "=r" (rval)); rval; })
-
-#define SPRN_PVR        0x11F  /* Processor Version Register */
-#define PVR_VER(pvr)    (((pvr) >>  16) & 0xFFFF) /* Version field */
-#define PVR_REV(pvr)    (((pvr) >>   0) & 0xFFFF) /* Revison field */
-
 int
 get_cpuid(char *buffer, size_t sz)
 {
index 0a52429..2b6d470 100644 (file)
@@ -6,9 +6,16 @@
 
 #include "../../../util/perf_regs.h"
 #include "../../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/header.h"
+#include "../../../perf-sys.h"
+#include "utils_header.h"
 
 #include <linux/kernel.h>
 
+#define PVR_POWER9             0x004E
+#define PVR_POWER10            0x0080
+
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(r0, PERF_REG_POWERPC_R0),
        SMPL_REG(r1, PERF_REG_POWERPC_R1),
@@ -55,6 +62,12 @@ const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
        SMPL_REG(sier, PERF_REG_POWERPC_SIER),
        SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
+       SMPL_REG(mmcr0, PERF_REG_POWERPC_MMCR0),
+       SMPL_REG(mmcr1, PERF_REG_POWERPC_MMCR1),
+       SMPL_REG(mmcr2, PERF_REG_POWERPC_MMCR2),
+       SMPL_REG(mmcr3, PERF_REG_POWERPC_MMCR3),
+       SMPL_REG(sier2, PERF_REG_POWERPC_SIER2),
+       SMPL_REG(sier3, PERF_REG_POWERPC_SIER3),
        SMPL_REG_END
 };
 
@@ -163,3 +176,45 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
 
        return SDT_ARG_VALID;
 }
+
+uint64_t arch__intr_reg_mask(void)
+{
+       struct perf_event_attr attr = {
+               .type                   = PERF_TYPE_HARDWARE,
+               .config                 = PERF_COUNT_HW_CPU_CYCLES,
+               .sample_type            = PERF_SAMPLE_REGS_INTR,
+               .precise_ip             = 1,
+               .disabled               = 1,
+               .exclude_kernel         = 1,
+       };
+       int fd;
+       u32 version;
+       u64 extended_mask = 0, mask = PERF_REGS_MASK;
+
+       /*
+        * Get the PVR value to set the extended
+        * mask specific to platform.
+        */
+       version = (((mfspr(SPRN_PVR)) >>  16) & 0xFFFF);
+       if (version == PVR_POWER9)
+               extended_mask = PERF_REG_PMU_MASK_300;
+       else if (version == PVR_POWER10)
+               extended_mask = PERF_REG_PMU_MASK_31;
+       else
+               return mask;
+
+       attr.sample_regs_intr = extended_mask;
+       attr.sample_period = 1;
+       event_attr_init(&attr);
+
+       /*
+        * check if the pmu supports perf extended regs, before
+        * returning the register mask to sample.
+        */
+       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       if (fd != -1) {
+               close(fd);
+               mask |= extended_mask;
+       }
+       return mask;
+}
diff --git a/tools/perf/arch/powerpc/util/utils_header.h b/tools/perf/arch/powerpc/util/utils_header.h
new file mode 100644 (file)
index 0000000..5788eb1
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_UTIL_HEADER_H
+#define __PERF_UTIL_HEADER_H
+
+#include <linux/stringify.h>
+
+#define mfspr(rn)       ({unsigned long rval; \
+                       asm volatile("mfspr %0," __stringify(rn) \
+                               : "=r" (rval)); rval; })
+
+#define SPRN_PVR        0x11F   /* Processor Version Register */
+#define PVR_VER(pvr)    (((pvr) >>  16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr)    (((pvr) >>   0) & 0xFFFF) /* Revison field */
+
+#endif /* __PERF_UTIL_HEADER_H */
index 56ae24b..6a0bbea 100644 (file)
 146  common    writev                  sys_writev                      compat_sys_writev
 147  common    getsid                  sys_getsid                      sys_getsid
 148  common    fdatasync               sys_fdatasync                   sys_fdatasync
-149  common    _sysctl                 sys_sysctl                      compat_sys_sysctl
+149  common    _sysctl                 -                               -
 150  common    mlock                   sys_mlock                       compat_sys_mlock
 151  common    munlock                 sys_munlock                     compat_sys_munlock
 152  common    mlockall                sys_mlockall                    sys_mlockall
index e008d63..f30d6ae 100644 (file)
 153    common  vhangup                 sys_vhangup
 154    common  modify_ldt              sys_modify_ldt
 155    common  pivot_root              sys_pivot_root
-156    64      _sysctl                 sys_sysctl
+156    64      _sysctl                 sys_ni_syscall
 157    common  prctl                   sys_prctl
 158    common  arch_prctl              sys_arch_prctl
 159    common  adjtimex                sys_adjtimex
 433    common  fspick                  sys_fspick
 434    common  pidfd_open              sys_pidfd_open
 435    common  clone3                  sys_clone3
+436    common  close_range             sys_close_range
 437    common  openat2                 sys_openat2
 438    common  pidfd_getfd             sys_pidfd_getfd
 439    common  faccessat2              sys_faccessat2
index 6ce4512..082e5f2 100644 (file)
@@ -837,6 +837,10 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                }
        }
 
+       if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
+           perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
+               opts->text_poke = true;
+
        if (intel_pt_evsel) {
                /*
                 * To obtain the auxtrace buffer file descriptor, the auxtrace
index 768e408..dd68a40 100644 (file)
@@ -1,5 +1,6 @@
 perf-y += sched-messaging.o
 perf-y += sched-pipe.o
+perf-y += syscall.o
 perf-y += mem-functions.o
 perf-y += futex-hash.o
 perf-y += futex-wake.o
@@ -10,6 +11,7 @@ perf-y += epoll-wait.o
 perf-y += epoll-ctl.o
 perf-y += synthesize.o
 perf-y += kallsyms-parse.o
+perf-y += find-bit-bench.o
 
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
index 61cae49..2804812 100644 (file)
@@ -33,8 +33,10 @@ extern struct timeval bench__start, bench__end, bench__runtime;
 int bench_numa(int argc, const char **argv);
 int bench_sched_messaging(int argc, const char **argv);
 int bench_sched_pipe(int argc, const char **argv);
+int bench_syscall_basic(int argc, const char **argv);
 int bench_mem_memcpy(int argc, const char **argv);
 int bench_mem_memset(int argc, const char **argv);
+int bench_mem_find_bit(int argc, const char **argv);
 int bench_futex_hash(int argc, const char **argv);
 int bench_futex_wake(int argc, const char **argv);
 int bench_futex_wake_parallel(int argc, const char **argv);
diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c
new file mode 100644 (file)
index 0000000..73b5bcc
--- /dev/null
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Benchmark find_next_bit and related bit operations.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include <stdlib.h>
+#include "bench.h"
+#include "../util/stat.h"
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/time64.h>
+#include <subcmd/parse-options.h>
+
+static unsigned int outer_iterations = 5;
+static unsigned int inner_iterations = 100000;
+
+static const struct option options[] = {
+       OPT_UINTEGER('i', "outer-iterations", &outer_iterations,
+               "Number of outer iterations used"),
+       OPT_UINTEGER('j', "inner-iterations", &inner_iterations,
+               "Number of inner iterations used"),
+       OPT_END()
+};
+
+static const char *const bench_usage[] = {
+       "perf bench mem find_bit <options>",
+       NULL
+};
+
+static unsigned int accumulator;
+static unsigned int use_of_val;
+
+static noinline void workload(int val)
+{
+       use_of_val += val;
+       accumulator++;
+}
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__)
+static bool asm_test_bit(long nr, const unsigned long *addr)
+{
+       bool oldbit;
+
+       asm volatile("bt %2,%1"
+                    : "=@ccc" (oldbit)
+                    : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
+
+       return oldbit;
+}
+#else
+#define asm_test_bit test_bit
+#endif
+
+static int do_for_each_set_bit(unsigned int num_bits)
+{
+       unsigned long *to_test = bitmap_alloc(num_bits);
+       struct timeval start, end, diff;
+       u64 runtime_us;
+       struct stats fb_time_stats, tb_time_stats;
+       double time_average, time_stddev;
+       unsigned int bit, i, j;
+       unsigned int set_bits, skip;
+       unsigned int old;
+
+       init_stats(&fb_time_stats);
+       init_stats(&tb_time_stats);
+
+       for (set_bits = 1; set_bits <= num_bits; set_bits <<= 1) {
+               bitmap_zero(to_test, num_bits);
+               skip = num_bits / set_bits;
+               for (i = 0; i < num_bits; i += skip)
+                       set_bit(i, to_test);
+
+               for (i = 0; i < outer_iterations; i++) {
+                       old = accumulator;
+                       gettimeofday(&start, NULL);
+                       for (j = 0; j < inner_iterations; j++) {
+                               for_each_set_bit(bit, to_test, num_bits)
+                                       workload(bit);
+                       }
+                       gettimeofday(&end, NULL);
+                       assert(old + (inner_iterations * set_bits) == accumulator);
+                       timersub(&end, &start, &diff);
+                       runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+                       update_stats(&fb_time_stats, runtime_us);
+
+                       old = accumulator;
+                       gettimeofday(&start, NULL);
+                       for (j = 0; j < inner_iterations; j++) {
+                               for (bit = 0; bit < num_bits; bit++) {
+                                       if (asm_test_bit(bit, to_test))
+                                               workload(bit);
+                               }
+                       }
+                       gettimeofday(&end, NULL);
+                       assert(old + (inner_iterations * set_bits) == accumulator);
+                       timersub(&end, &start, &diff);
+                       runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+                       update_stats(&tb_time_stats, runtime_us);
+               }
+
+               printf("%d operations %d bits set of %d bits\n",
+                       inner_iterations, set_bits, num_bits);
+               time_average = avg_stats(&fb_time_stats);
+               time_stddev = stddev_stats(&fb_time_stats);
+               printf("  Average for_each_set_bit took: %.3f usec (+- %.3f usec)\n",
+                       time_average, time_stddev);
+               time_average = avg_stats(&tb_time_stats);
+               time_stddev = stddev_stats(&tb_time_stats);
+               printf("  Average test_bit loop took:    %.3f usec (+- %.3f usec)\n",
+                       time_average, time_stddev);
+
+               if (use_of_val == accumulator)  /* Try to avoid compiler tricks. */
+                       printf("\n");
+       }
+       bitmap_free(to_test);
+       return 0;
+}
+
+int bench_mem_find_bit(int argc, const char **argv)
+{
+       int err = 0, i;
+
+       argc = parse_options(argc, argv, options, bench_usage, 0);
+       if (argc) {
+               usage_with_options(bench_usage, options);
+               exit(EXIT_FAILURE);
+       }
+
+       for (i = 1; i <= 2048; i <<= 1)
+               do_for_each_set_bit(i);
+
+       return err;
+}
index 9235b76..19d45c3 100644 (file)
@@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
        return 0;
 }
 
-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
 {
-       u64 cycle_start = 0ULL, cycle_end = 0ULL;
-       memcpy_t fn = r->fn.memcpy;
-       int i;
-
        /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
        memset(src, 0, size);
 
@@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
         * to not measure page fault overhead:
         */
        fn(dst, src, size);
+}
+
+static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+{
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
+       memcpy_t fn = r->fn.memcpy;
+       int i;
+
+       memcpy_prefault(fn, size, src, dst);
 
        cycle_start = get_cycles();
        for (i = 0; i < nr_loops; ++i)
@@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
        memcpy_t fn = r->fn.memcpy;
        int i;
 
-       /*
-        * We prefault the freshly allocated memory range here,
-        * to not measure page fault overhead:
-        */
-       fn(dst, src, size);
+       memcpy_prefault(fn, size, src, dst);
 
        BUG_ON(gettimeofday(&tv_start, NULL));
        for (i = 0; i < nr_loops; ++i)
index 5797253..f85bcec 100644 (file)
@@ -247,17 +247,22 @@ static int is_node_present(int node)
  */
 static bool node_has_cpus(int node)
 {
-       struct bitmask *cpu = numa_allocate_cpumask();
-       unsigned int i;
+       struct bitmask *cpumask = numa_allocate_cpumask();
+       bool ret = false; /* fall back to nocpus */
+       int cpu;
 
-       if (cpu && !numa_node_to_cpus(node, cpu)) {
-               for (i = 0; i < cpu->size; i++) {
-                       if (numa_bitmask_isbitset(cpu, i))
-                               return true;
+       BUG_ON(!cpumask);
+       if (!numa_node_to_cpus(node, cpumask)) {
+               for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+                       if (numa_bitmask_isbitset(cpumask, cpu)) {
+                               ret = true;
+                               break;
+                       }
                }
        }
+       numa_free_cpumask(cpumask);
 
-       return false; /* lets fall back to nocpus safely */
+       return ret;
 }
 
 static cpu_set_t bind_to_cpu(int target_cpu)
@@ -288,14 +293,10 @@ static cpu_set_t bind_to_cpu(int target_cpu)
 
 static cpu_set_t bind_to_node(int target_node)
 {
-       int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
        cpu_set_t orig_mask, mask;
        int cpu;
        int ret;
 
-       BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
-       BUG_ON(!cpus_per_node);
-
        ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
        BUG_ON(ret);
 
@@ -305,13 +306,16 @@ static cpu_set_t bind_to_node(int target_node)
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
                        CPU_SET(cpu, &mask);
        } else {
-               int cpu_start = (target_node + 0) * cpus_per_node;
-               int cpu_stop  = (target_node + 1) * cpus_per_node;
+               struct bitmask *cpumask = numa_allocate_cpumask();
 
-               BUG_ON(cpu_stop > g->p.nr_cpus);
-
-               for (cpu = cpu_start; cpu < cpu_stop; cpu++)
-                       CPU_SET(cpu, &mask);
+               BUG_ON(!cpumask);
+               if (!numa_node_to_cpus(target_node, cpumask)) {
+                       for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+                               if (numa_bitmask_isbitset(cpumask, cpu))
+                                       CPU_SET(cpu, &mask);
+                       }
+               }
+               numa_free_cpumask(cpumask);
        }
 
        ret = sched_setaffinity(0, sizeof(mask), &mask);
@@ -729,8 +733,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
                return -1;
 
        return parse_node_list(arg);
-
-       return 0;
 }
 
 #define BIT(x) (1ul << x)
@@ -813,12 +815,12 @@ static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val
                        }
                }
        } else if (!g->p.data_backwards || (nr + loop) & 1) {
+               /* Process data forwards: */
 
                d0 = data + off;
                d  = data + off + 1;
                d1 = data + words;
 
-               /* Process data forwards: */
                for (;;) {
                        if (unlikely(d >= d1))
                                d = data;
@@ -836,7 +838,6 @@ static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val
                d  = data + off - 1;
                d1 = data + words;
 
-               /* Process data forwards: */
                for (;;) {
                        if (unlikely(d < data))
                                d = data + words-1;
@@ -1733,12 +1734,12 @@ err:
  */
 static const char *tests[][MAX_ARGS] = {
    /* Basic single-stream NUMA bandwidth measurements: */
-   { "RAM-bw-local,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
+   { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
                          "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
    { "RAM-bw-local-NOTHP,",
                          "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
                          "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
-   { "RAM-bw-remote,",   "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
+   { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
                          "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
 
    /* 2-stream NUMA bandwidth measurements: */
@@ -1755,7 +1756,7 @@ static const char *tests[][MAX_ARGS] = {
    { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
    { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
    { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
-   { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
+   { " 2x3-convergence,", "mem",  "-p",  "2", "-t",  "3", "-P", "1020", OPT_CONV },
    { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
    { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
    { " 4x4-convergence-NOTHP,",
@@ -1780,24 +1781,24 @@ static const char *tests[][MAX_ARGS] = {
                          "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
    { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
 
-   { " 4x1-bw-thread,",          "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
-   { " 8x1-bw-thread,",          "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
-   { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
-   { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
+   { " 1x4-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
+   { " 1x8-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
+   { "1x16-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
+   { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
 
-   { " 2x3-bw-thread,",          "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
-   { " 4x4-bw-thread,",          "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
-   { " 4x6-bw-thread,",          "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
-   { " 4x8-bw-thread,",          "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
-   { " 4x8-bw-thread-NOTHP,",
+   { " 2x3-bw-process,",  "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
+   { " 4x4-bw-process,",  "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
+   { " 4x6-bw-process,",  "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
+   { " 4x8-bw-process,",  "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
+   { " 4x8-bw-process-NOTHP,",
                          "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
-   { " 3x3-bw-thread,",          "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
-   { " 5x5-bw-thread,",          "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
+   { " 3x3-bw-process,",  "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
+   { " 5x5-bw-process,",  "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
 
-   { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
-   { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
+   { "2x16-bw-process,",  "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
+   { "1x32-bw-process,",  "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
 
-   { "numa02-bw,",       "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
+   { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
    { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
    { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
    { "numa01-bw-thread-NOTHP,",
diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c
new file mode 100644 (file)
index 0000000..5fe621c
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ *
+ * syscall.c
+ *
+ * syscall: Benchmark for system call performance
+ */
+#include "../perf.h"
+#include "../util/util.h"
+#include <subcmd/parse-options.h>
+#include "../builtin.h"
+#include "bench.h"
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#define LOOPS_DEFAULT 10000000
+static int loops = LOOPS_DEFAULT;
+
+static const struct option options[] = {
+       OPT_INTEGER('l', "loop",        &loops,         "Specify number of loops"),
+       OPT_END()
+};
+
+static const char * const bench_syscall_usage[] = {
+       "perf bench syscall <options>",
+       NULL
+};
+
+int bench_syscall_basic(int argc, const char **argv)
+{
+       struct timeval start, stop, diff;
+       unsigned long long result_usec = 0;
+       int i;
+
+       argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
+
+       gettimeofday(&start, NULL);
+
+       for (i = 0; i < loops; i++)
+               getppid();
+
+       gettimeofday(&stop, NULL);
+       timersub(&stop, &start, &diff);
+
+       switch (bench_format) {
+       case BENCH_FORMAT_DEFAULT:
+               printf("# Executed %'d getppid() calls\n", loops);
+
+               result_usec = diff.tv_sec * 1000000;
+               result_usec += diff.tv_usec;
+
+               printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+                      diff.tv_sec,
+                      (unsigned long) (diff.tv_usec/1000));
+
+               printf(" %14lf usecs/op\n",
+                      (double)result_usec / (double)loops);
+               printf(" %'14d ops/sec\n",
+                      (int)((double)loops /
+                            ((double)result_usec / (double)1000000)));
+               break;
+
+       case BENCH_FORMAT_SIMPLE:
+               printf("%lu.%03lu\n",
+                      diff.tv_sec,
+                      (unsigned long) (diff.tv_usec / 1000));
+               break;
+
+       default:
+               /* reaching here is something disaster */
+               fprintf(stderr, "Unknown format:%d\n", bench_format);
+               exit(1);
+               break;
+       }
+
+       return 0;
+}
index cad31b1..4f17603 100644 (file)
@@ -11,6 +11,7 @@
  * Available benchmark collection list:
  *
  *  sched ... scheduler and IPC performance
+ *  syscall ... System call performance
  *  mem   ... memory access performance
  *  numa  ... NUMA scheduling and MM performance
  *  futex ... Futex performance
@@ -49,9 +50,16 @@ static struct bench sched_benchmarks[] = {
        { NULL,         NULL,                                           NULL                    }
 };
 
+static struct bench syscall_benchmarks[] = {
+       { "basic",      "Benchmark for basic getppid(2) calls",         bench_syscall_basic     },
+       { "all",        "Run all syscall benchmarks",                   NULL                    },
+       { NULL,         NULL,                                           NULL                    },
+};
+
 static struct bench mem_benchmarks[] = {
        { "memcpy",     "Benchmark for memcpy() functions",             bench_mem_memcpy        },
        { "memset",     "Benchmark for memset() functions",             bench_mem_memset        },
+       { "find_bit",   "Benchmark for find_bit() functions",           bench_mem_find_bit      },
        { "all",        "Run all memory access benchmarks",             NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
@@ -90,6 +98,7 @@ struct collection {
 
 static struct collection collections[] = {
        { "sched",      "Scheduler and IPC benchmarks",                 sched_benchmarks        },
+       { "syscall",    "System call benchmarks",                       syscall_benchmarks      },
        { "mem",        "Memory access benchmarks",                     mem_benchmarks          },
 #ifdef HAVE_LIBNUMA_SUPPORT
        { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
index d617d56..5938b10 100644 (file)
@@ -2582,7 +2582,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 
 static int setup_callchain(struct evlist *evlist)
 {
-       u64 sample_type = perf_evlist__combined_sample_type(evlist);
+       u64 sample_type = evlist__combined_sample_type(evlist);
        enum perf_call_graph_mode mode = CALLCHAIN_NONE;
 
        if ((sample_type & PERF_SAMPLE_REGS_USER) &&
index ca2fb44..8d23b8d 100644 (file)
@@ -65,6 +65,7 @@ static int cmd_data_convert(int argc, const char **argv)
                OPT_STRING('i', "input", &input_name, "file", "input file name"),
 #ifdef HAVE_LIBBABELTRACE_SUPPORT
                OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
+               OPT_BOOLEAN(0, "tod", &opts.tod, "Convert time to wall clock time"),
 #endif
                OPT_BOOLEAN('f', "force", &opts.force, "don't complain, do it"),
                OPT_BOOLEAN(0, "all", &opts.all, "Convert all events"),
index 2bfc1b0..1d44bc2 100644 (file)
@@ -3,6 +3,7 @@
  * builtin-ftrace.c
  *
  * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
+ * Copyright (c) 2020  Changbin Du <changbin.du@gmail.com>, significant enhancement.
  */
 
 #include "builtin.h"
@@ -26,6 +27,8 @@
 #include "thread_map.h"
 #include "util/cap.h"
 #include "util/config.h"
+#include "util/units.h"
+#include "util/parse-sublevel-options.h"
 
 #define DEFAULT_TRACER  "function_graph"
 
@@ -33,11 +36,21 @@ struct perf_ftrace {
        struct evlist           *evlist;
        struct target           target;
        const char              *tracer;
+       bool                    list_avail_functions;
        struct list_head        filters;
        struct list_head        notrace;
        struct list_head        graph_funcs;
        struct list_head        nograph_funcs;
        int                     graph_depth;
+       unsigned long           percpu_buffer_size;
+       bool                    inherit;
+       int                     func_stack_trace;
+       int                     func_irq_info;
+       int                     graph_nosleep_time;
+       int                     graph_noirqs;
+       int                     graph_verbose;
+       int                     graph_thresh;
+       unsigned int            initial_delay;
 };
 
 struct filter_entry {
@@ -128,9 +141,85 @@ static int append_tracing_file(const char *name, const char *val)
        return __write_tracing_file(name, val, true);
 }
 
+static int read_tracing_file_to_stdout(const char *name)
+{
+       char buf[4096];
+       char *file;
+       int fd;
+       int ret = -1;
+
+       file = get_tracing_file(name);
+       if (!file) {
+               pr_debug("cannot get tracing file: %s\n", name);
+               return -1;
+       }
+
+       fd = open(file, O_RDONLY);
+       if (fd < 0) {
+               pr_debug("cannot open tracing file: %s: %s\n",
+                        name, str_error_r(errno, buf, sizeof(buf)));
+               goto out;
+       }
+
+       /* read contents to stdout */
+       while (true) {
+               int n = read(fd, buf, sizeof(buf));
+               if (n == 0)
+                       break;
+               else if (n < 0)
+                       goto out_close;
+
+               if (fwrite(buf, n, 1, stdout) != 1)
+                       goto out_close;
+       }
+       ret = 0;
+
+out_close:
+       close(fd);
+out:
+       put_tracing_file(file);
+       return ret;
+}
+
+static int write_tracing_file_int(const char *name, int value)
+{
+       char buf[16];
+
+       snprintf(buf, sizeof(buf), "%d", value);
+       if (write_tracing_file(name, buf) < 0)
+               return -1;
+
+       return 0;
+}
+
+static int write_tracing_option_file(const char *name, const char *val)
+{
+       char *file;
+       int ret;
+
+       if (asprintf(&file, "options/%s", name) < 0)
+               return -1;
+
+       ret = __write_tracing_file(file, val, false);
+       free(file);
+       return ret;
+}
+
 static int reset_tracing_cpu(void);
 static void reset_tracing_filters(void);
 
+static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
+{
+       write_tracing_option_file("function-fork", "0");
+       write_tracing_option_file("func_stack_trace", "0");
+       write_tracing_option_file("sleep-time", "1");
+       write_tracing_option_file("funcgraph-irqs", "1");
+       write_tracing_option_file("funcgraph-proc", "0");
+       write_tracing_option_file("funcgraph-abstime", "0");
+       write_tracing_option_file("latency-format", "0");
+       write_tracing_option_file("irq-info", "0");
+}
+
 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
 {
        if (write_tracing_file("tracing_on", "0") < 0)
@@ -148,7 +237,11 @@ static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
        if (write_tracing_file("max_graph_depth", "0") < 0)
                return -1;
 
+       if (write_tracing_file("tracing_thresh", "0") < 0)
+               return -1;
+
        reset_tracing_filters();
+       reset_tracing_options(ftrace);
        return 0;
 }
 
@@ -204,6 +297,28 @@ static int set_tracing_cpu(struct perf_ftrace *ftrace)
        return set_tracing_cpumask(cpumap);
 }
 
+static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->func_stack_trace)
+               return 0;
+
+       if (write_tracing_option_file("func_stack_trace", "1") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->func_irq_info)
+               return 0;
+
+       if (write_tracing_option_file("irq-info", "1") < 0)
+               return -1;
+
+       return 0;
+}
+
 static int reset_tracing_cpu(void)
 {
        struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
@@ -258,8 +373,6 @@ static void reset_tracing_filters(void)
 
 static int set_tracing_depth(struct perf_ftrace *ftrace)
 {
-       char buf[16];
-
        if (ftrace->graph_depth == 0)
                return 0;
 
@@ -268,10 +381,152 @@ static int set_tracing_depth(struct perf_ftrace *ftrace)
                return -1;
        }
 
-       snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
+       if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
+{
+       int ret;
+
+       if (ftrace->percpu_buffer_size == 0)
+               return 0;
+
+       ret = write_tracing_file_int("buffer_size_kb",
+                                    ftrace->percpu_buffer_size / 1024);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->inherit)
+               return 0;
+
+       if (write_tracing_option_file("function-fork", "1") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->graph_nosleep_time)
+               return 0;
+
+       if (write_tracing_option_file("sleep-time", "0") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->graph_noirqs)
+               return 0;
+
+       if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
+{
+       if (!ftrace->graph_verbose)
+               return 0;
+
+       if (write_tracing_option_file("funcgraph-proc", "1") < 0)
+               return -1;
+
+       if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
+               return -1;
+
+       if (write_tracing_option_file("latency-format", "1") < 0)
+               return -1;
+
+       return 0;
+}
+
+static int set_tracing_thresh(struct perf_ftrace *ftrace)
+{
+       int ret;
+
+       if (ftrace->graph_thresh == 0)
+               return 0;
+
+       ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int set_tracing_options(struct perf_ftrace *ftrace)
+{
+       if (set_tracing_pid(ftrace) < 0) {
+               pr_err("failed to set ftrace pid\n");
+               return -1;
+       }
+
+       if (set_tracing_cpu(ftrace) < 0) {
+               pr_err("failed to set tracing cpumask\n");
+               return -1;
+       }
+
+       if (set_tracing_func_stack_trace(ftrace) < 0) {
+               pr_err("failed to set tracing option func_stack_trace\n");
+               return -1;
+       }
+
+       if (set_tracing_func_irqinfo(ftrace) < 0) {
+               pr_err("failed to set tracing option irq-info\n");
+               return -1;
+       }
+
+       if (set_tracing_filters(ftrace) < 0) {
+               pr_err("failed to set tracing filters\n");
+               return -1;
+       }
+
+       if (set_tracing_depth(ftrace) < 0) {
+               pr_err("failed to set graph depth\n");
+               return -1;
+       }
+
+       if (set_tracing_percpu_buffer_size(ftrace) < 0) {
+               pr_err("failed to set tracing per-cpu buffer size\n");
+               return -1;
+       }
+
+       if (set_tracing_trace_inherit(ftrace) < 0) {
+               pr_err("failed to set tracing option function-fork\n");
+               return -1;
+       }
+
+       if (set_tracing_sleep_time(ftrace) < 0) {
+               pr_err("failed to set tracing option sleep-time\n");
+               return -1;
+       }
+
+       if (set_tracing_funcgraph_irqs(ftrace) < 0) {
+               pr_err("failed to set tracing option funcgraph-irqs\n");
+               return -1;
+       }
 
-       if (write_tracing_file("max_graph_depth", buf) < 0)
+       if (set_tracing_funcgraph_verbose(ftrace) < 0) {
+               pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
                return -1;
+       }
+
+       if (set_tracing_thresh(ftrace) < 0) {
+               pr_err("failed to set tracing thresh\n");
+               return -1;
+       }
 
        return 0;
 }
@@ -302,6 +557,9 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
        signal(SIGCHLD, sig_handler);
        signal(SIGPIPE, sig_handler);
 
+       if (ftrace->list_avail_functions)
+               return read_tracing_file_to_stdout("available_filter_functions");
+
        if (reset_tracing_files(ftrace) < 0) {
                pr_err("failed to reset ftrace\n");
                goto out;
@@ -317,25 +575,8 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
                goto out;
        }
 
-       if (set_tracing_pid(ftrace) < 0) {
-               pr_err("failed to set ftrace pid\n");
+       if (set_tracing_options(ftrace) < 0)
                goto out_reset;
-       }
-
-       if (set_tracing_cpu(ftrace) < 0) {
-               pr_err("failed to set tracing cpumask\n");
-               goto out_reset;
-       }
-
-       if (set_tracing_filters(ftrace) < 0) {
-               pr_err("failed to set tracing filters\n");
-               goto out_reset;
-       }
-
-       if (set_tracing_depth(ftrace) < 0) {
-               pr_err("failed to set graph depth\n");
-               goto out_reset;
-       }
 
        if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
                pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
@@ -362,13 +603,26 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
        fcntl(trace_fd, F_SETFL, O_NONBLOCK);
        pollfd.fd = trace_fd;
 
-       if (write_tracing_file("tracing_on", "1") < 0) {
-               pr_err("can't enable tracing\n");
-               goto out_close_fd;
+       /* display column headers */
+       read_tracing_file_to_stdout("trace");
+
+       if (!ftrace->initial_delay) {
+               if (write_tracing_file("tracing_on", "1") < 0) {
+                       pr_err("can't enable tracing\n");
+                       goto out_close_fd;
+               }
        }
 
        perf_evlist__start_workload(ftrace->evlist);
 
+       if (ftrace->initial_delay) {
+               usleep(ftrace->initial_delay * 1000);
+               if (write_tracing_file("tracing_on", "1") < 0) {
+                       pr_err("can't enable tracing\n");
+                       goto out_close_fd;
+               }
+       }
+
        while (!done) {
                if (poll(&pollfd, 1, -1) < 0)
                        break;
@@ -455,6 +709,99 @@ static void delete_filter_func(struct list_head *head)
        }
 }
 
+static int parse_buffer_size(const struct option *opt,
+                            const char *str, int unset)
+{
+       unsigned long *s = (unsigned long *)opt->value;
+       static struct parse_tag tags_size[] = {
+               { .tag  = 'B', .mult = 1       },
+               { .tag  = 'K', .mult = 1 << 10 },
+               { .tag  = 'M', .mult = 1 << 20 },
+               { .tag  = 'G', .mult = 1 << 30 },
+               { .tag  = 0 },
+       };
+       unsigned long val;
+
+       if (unset) {
+               *s = 0;
+               return 0;
+       }
+
+       val = parse_tag_value(str, tags_size);
+       if (val != (unsigned long) -1) {
+               if (val < 1024) {
+                       pr_err("buffer size too small, must larger than 1KB.");
+                       return -1;
+               }
+               *s = val;
+               return 0;
+       }
+
+       return -1;
+}
+
+static int parse_func_tracer_opts(const struct option *opt,
+                                 const char *str, int unset)
+{
+       int ret;
+       struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
+       struct sublevel_option func_tracer_opts[] = {
+               { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
+               { .name = "irq-info",   .value_ptr = &ftrace->func_irq_info },
+               { .name = NULL, }
+       };
+
+       if (unset)
+               return 0;
+
+       ret = perf_parse_sublevel_options(str, func_tracer_opts);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int parse_graph_tracer_opts(const struct option *opt,
+                                 const char *str, int unset)
+{
+       int ret;
+       struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
+       struct sublevel_option graph_tracer_opts[] = {
+               { .name = "nosleep-time",       .value_ptr = &ftrace->graph_nosleep_time },
+               { .name = "noirqs",             .value_ptr = &ftrace->graph_noirqs },
+               { .name = "verbose",            .value_ptr = &ftrace->graph_verbose },
+               { .name = "thresh",             .value_ptr = &ftrace->graph_thresh },
+               { .name = "depth",              .value_ptr = &ftrace->graph_depth },
+               { .name = NULL, }
+       };
+
+       if (unset)
+               return 0;
+
+       ret = perf_parse_sublevel_options(str, graph_tracer_opts);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void select_tracer(struct perf_ftrace *ftrace)
+{
+       bool graph = !list_empty(&ftrace->graph_funcs) ||
+                    !list_empty(&ftrace->nograph_funcs);
+       bool func = !list_empty(&ftrace->filters) ||
+                   !list_empty(&ftrace->notrace);
+
+       /* The function_graph has priority over function tracer. */
+       if (graph)
+               ftrace->tracer = "function_graph";
+       else if (func)
+               ftrace->tracer = "function";
+       /* Otherwise, the default tracer is used. */
+
+       pr_debug("%s tracer is used\n", ftrace->tracer);
+}
+
 int cmd_ftrace(int argc, const char **argv)
 {
        int ret;
@@ -469,25 +816,42 @@ int cmd_ftrace(int argc, const char **argv)
        };
        const struct option ftrace_options[] = {
        OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
-                  "tracer to use: function_graph(default) or function"),
+                  "Tracer to use: function_graph(default) or function"),
+       OPT_BOOLEAN('F', "funcs", &ftrace.list_avail_functions,
+                   "Show available functions to filter"),
        OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
-                  "trace on existing process id"),
+                  "Trace on existing process id"),
+       /* TODO: Add short option -t after -t/--tracer can be removed. */
+       OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
+                  "Trace on existing thread id (exclusive to --pid)"),
        OPT_INCR('v', "verbose", &verbose,
-                "be more verbose"),
+                "Be more verbose"),
        OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
-                   "system-wide collection from all CPUs"),
+                   "System-wide collection from all CPUs"),
        OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
-                   "list of cpus to monitor"),
+                   "List of cpus to monitor"),
        OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
-                    "trace given functions only", parse_filter_func),
+                    "Trace given functions using function tracer",
+                    parse_filter_func),
        OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
-                    "do not trace given functions", parse_filter_func),
+                    "Do not trace given functions", parse_filter_func),
+       OPT_CALLBACK(0, "func-opts", &ftrace, "options",
+                    "Function tracer options, available options: call-graph,irq-info",
+                    parse_func_tracer_opts),
        OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
-                    "Set graph filter on given functions", parse_filter_func),
+                    "Trace given functions using function_graph tracer",
+                    parse_filter_func),
        OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
                     "Set nograph filter on given functions", parse_filter_func),
-       OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
-                   "Max depth for function graph tracer"),
+       OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
+                    "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
+                    parse_graph_tracer_opts),
+       OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
+                    "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
+       OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
+                   "Trace children processes"),
+       OPT_UINTEGER('D', "delay", &ftrace.initial_delay,
+                    "Number of milliseconds to wait before starting tracing after program start"),
        OPT_END()
        };
 
@@ -505,6 +869,8 @@ int cmd_ftrace(int argc, const char **argv)
        if (!argc && target__none(&ftrace.target))
                ftrace.target.system_wide = true;
 
+       select_tracer(&ftrace);
+
        ret = target__validate(&ftrace.target);
        if (ret) {
                char errbuf[512];
index 4a6de4b..6d2f410 100644 (file)
@@ -292,7 +292,7 @@ static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
         * if jit marker, then inject jit mmaps and generate ELF images
         */
        ret = jit_process(inject->session, &inject->output, machine,
-                         event->mmap.filename, sample->pid, &n);
+                         event->mmap.filename, event->mmap.pid, &n);
        if (ret < 0)
                return ret;
        if (ret) {
@@ -330,7 +330,7 @@ static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
         * if jit marker, then inject jit mmaps and generate ELF images
         */
        ret = jit_process(inject->session, &inject->output, machine,
-                         event->mmap2.filename, sample->pid, &n);
+                         event->mmap2.filename, event->mmap2.pid, &n);
        if (ret < 0)
                return ret;
        if (ret) {
index 38a5ab6..a50dae2 100644 (file)
@@ -1933,7 +1933,8 @@ int cmd_kmem(int argc, const char **argv)
                return ret;
 
        argc = parse_options_subcommand(argc, argv, kmem_options,
-                                       kmem_subcommands, kmem_usage, 0);
+                                       kmem_subcommands, kmem_usage,
+                                       PARSE_OPT_STOP_AT_NON_OPTION);
 
        if (!argc)
                usage_with_options(kmem_usage, kmem_options);
index 95a7705..460945d 100644 (file)
@@ -1319,7 +1319,7 @@ static struct evlist *kvm_live_event_list(void)
                *name = '\0';
                name++;
 
-               if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
+               if (evlist__add_newtp(evlist, sys, name, NULL)) {
                        pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
                        free(tp);
                        goto out;
index a37e791..f91352f 100644 (file)
@@ -46,6 +46,7 @@
 #include "util/bpf-event.h"
 #include "util/util.h"
 #include "util/pfm.h"
+#include "util/clockid.h"
 #include "asm/bug.h"
 #include "perf.h"
 
@@ -70,6 +71,7 @@
 #include <linux/time64.h>
 #include <linux/zalloc.h>
 #include <linux/bitmap.h>
+#include <sys/time.h>
 
 struct switch_output {
        bool             enabled;
@@ -765,6 +767,43 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
 
 #endif
 
+static int record__config_text_poke(struct evlist *evlist)
+{
+       struct evsel *evsel;
+       int err;
+
+       /* Nothing to do if text poke is already configured */
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->core.attr.text_poke)
+                       return 0;
+       }
+
+       err = parse_events(evlist, "dummy:u", NULL);
+       if (err)
+               return err;
+
+       evsel = evlist__last(evlist);
+
+       evsel->core.attr.freq = 0;
+       evsel->core.attr.sample_period = 1;
+       evsel->core.attr.text_poke = 1;
+       evsel->core.attr.ksymbol = 1;
+
+       evsel->core.system_wide = true;
+       evsel->no_aux_samples = true;
+       evsel->immediate = true;
+
+       /* Text poke must be collected on all CPUs */
+       perf_cpu_map__put(evsel->core.own_cpus);
+       evsel->core.own_cpus = perf_cpu_map__new(NULL);
+       perf_cpu_map__put(evsel->core.cpus);
+       evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
+
+       evsel__set_sample_bit(evsel, TIME);
+
+       return 0;
+}
+
 static bool record__kcore_readable(struct machine *machine)
 {
        char kcore[PATH_MAX];
@@ -855,7 +894,7 @@ static int record__open(struct record *rec)
                pos = perf_evlist__get_tracking_event(evlist);
                if (!evsel__is_dummy_event(pos)) {
                        /* Set up dummy event. */
-                       if (perf_evlist__add_dummy(evlist))
+                       if (evlist__add_dummy(evlist))
                                return -ENOMEM;
                        pos = evlist__last(evlist);
                        perf_evlist__set_tracking_event(evlist, pos);
@@ -1166,6 +1205,9 @@ static void record__init_features(struct record *rec)
        if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
                perf_header__clear_feat(&session->header, HEADER_CLOCKID);
 
+       if (!rec->opts.use_clockid)
+               perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
+
        perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
        if (!record__comp_enabled(rec))
                perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
@@ -1489,7 +1531,7 @@ static int record__setup_sb_evlist(struct record *rec)
                evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
                rec->thread_id = pthread_self();
        }
-
+#ifdef HAVE_LIBBPF_SUPPORT
        if (!opts->no_bpf_event) {
                if (rec->sb_evlist == NULL) {
                        rec->sb_evlist = evlist__new();
@@ -1505,7 +1547,7 @@ static int record__setup_sb_evlist(struct record *rec)
                        return -1;
                }
        }
-
+#endif
        if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
                pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
                opts->no_bpf_event = true;
@@ -1514,6 +1556,43 @@ static int record__setup_sb_evlist(struct record *rec)
        return 0;
 }
 
+static int record__init_clock(struct record *rec)
+{
+       struct perf_session *session = rec->session;
+       struct timespec ref_clockid;
+       struct timeval ref_tod;
+       u64 ref;
+
+       if (!rec->opts.use_clockid)
+               return 0;
+
+       if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
+               session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
+
+       session->header.env.clock.clockid = rec->opts.clockid;
+
+       if (gettimeofday(&ref_tod, NULL) != 0) {
+               pr_err("gettimeofday failed, cannot set reference time.\n");
+               return -1;
+       }
+
+       if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
+               pr_err("clock_gettime failed, cannot set reference time.\n");
+               return -1;
+       }
+
+       ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
+             (u64) ref_tod.tv_usec * NSEC_PER_USEC;
+
+       session->header.env.clock.tod_ns = ref;
+
+       ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
+             (u64) ref_clockid.tv_nsec;
+
+       session->header.env.clock.clockid_ns = ref;
+       return 0;
+}
+
 static int __cmd_record(struct record *rec, int argc, const char **argv)
 {
        int err;
@@ -1527,6 +1606,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        bool disabled = false, draining = false;
        int fd;
        float ratio = 0;
+       enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
 
        atexit(record__sig_exit);
        signal(SIGCHLD, sig_handler);
@@ -1593,10 +1673,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                return -1;
        }
 
-       record__init_features(rec);
+       if (record__init_clock(rec))
+               return -1;
 
-       if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
-               session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
+       record__init_features(rec);
 
        if (forks) {
                err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
@@ -1646,7 +1726,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
         * Normally perf_session__new would do this, but it doesn't have the
         * evlist.
         */
-       if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+       if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
                pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
                rec->tool.ordered_events = false;
        }
@@ -1748,9 +1828,16 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                perf_evlist__start_workload(rec->evlist);
        }
 
+       if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
+               goto out_child;
+
        if (opts->initial_delay) {
-               usleep(opts->initial_delay * USEC_PER_MSEC);
-               evlist__enable(rec->evlist);
+               pr_info(EVLIST_DISABLED_MSG);
+               if (opts->initial_delay > 0) {
+                       usleep(opts->initial_delay * USEC_PER_MSEC);
+                       evlist__enable(rec->evlist);
+                       pr_info(EVLIST_ENABLED_MSG);
+               }
        }
 
        trigger_ready(&auxtrace_snapshot_trigger);
@@ -1842,6 +1929,21 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                                draining = true;
                }
 
+               if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
+                       switch (cmd) {
+                       case EVLIST_CTL_CMD_ENABLE:
+                               pr_info(EVLIST_ENABLED_MSG);
+                               break;
+                       case EVLIST_CTL_CMD_DISABLE:
+                               pr_info(EVLIST_DISABLED_MSG);
+                               break;
+                       case EVLIST_CTL_CMD_ACK:
+                       case EVLIST_CTL_CMD_UNSUPPORTED:
+                       default:
+                               break;
+                       }
+               }
+
                /*
                 * When perf is starting the traced process, at the end events
                 * die with the process and we wait for that. Thus no need to
@@ -1875,6 +1977,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                record__synthesize_workload(rec, true);
 
 out_child:
+       evlist__finalize_ctlfd(rec->evlist);
        record__mmap_read_all(rec, true);
        record__aio_mmap_read_sync(rec);
 
@@ -2041,103 +2144,6 @@ static int perf_record_config(const char *var, const char *value, void *cb)
        return 0;
 }
 
-struct clockid_map {
-       const char *name;
-       int clockid;
-};
-
-#define CLOCKID_MAP(n, c)      \
-       { .name = n, .clockid = (c), }
-
-#define CLOCKID_END    { .name = NULL, }
-
-
-/*
- * Add the missing ones, we need to build on many distros...
- */
-#ifndef CLOCK_MONOTONIC_RAW
-#define CLOCK_MONOTONIC_RAW 4
-#endif
-#ifndef CLOCK_BOOTTIME
-#define CLOCK_BOOTTIME 7
-#endif
-#ifndef CLOCK_TAI
-#define CLOCK_TAI 11
-#endif
-
-static const struct clockid_map clockids[] = {
-       /* available for all events, NMI safe */
-       CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
-       CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
-
-       /* available for some events */
-       CLOCKID_MAP("realtime", CLOCK_REALTIME),
-       CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
-       CLOCKID_MAP("tai", CLOCK_TAI),
-
-       /* available for the lazy */
-       CLOCKID_MAP("mono", CLOCK_MONOTONIC),
-       CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
-       CLOCKID_MAP("real", CLOCK_REALTIME),
-       CLOCKID_MAP("boot", CLOCK_BOOTTIME),
-
-       CLOCKID_END,
-};
-
-static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
-{
-       struct timespec res;
-
-       *res_ns = 0;
-       if (!clock_getres(clk_id, &res))
-               *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
-       else
-               pr_warning("WARNING: Failed to determine specified clock resolution.\n");
-
-       return 0;
-}
-
-static int parse_clockid(const struct option *opt, const char *str, int unset)
-{
-       struct record_opts *opts = (struct record_opts *)opt->value;
-       const struct clockid_map *cm;
-       const char *ostr = str;
-
-       if (unset) {
-               opts->use_clockid = 0;
-               return 0;
-       }
-
-       /* no arg passed */
-       if (!str)
-               return 0;
-
-       /* no setting it twice */
-       if (opts->use_clockid)
-               return -1;
-
-       opts->use_clockid = true;
-
-       /* if its a number, we're done */
-       if (sscanf(str, "%d", &opts->clockid) == 1)
-               return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
-
-       /* allow a "CLOCK_" prefix to the name */
-       if (!strncasecmp(str, "CLOCK_", 6))
-               str += 6;
-
-       for (cm = clockids; cm->name; cm++) {
-               if (!strcasecmp(str, cm->name)) {
-                       opts->clockid = cm->clockid;
-                       return get_clockid_res(opts->clockid,
-                                              &opts->clockid_res_ns);
-               }
-       }
-
-       opts->use_clockid = false;
-       ui__warning("unknown clockid %s, check man page\n", ostr);
-       return -1;
-}
 
 static int record__parse_affinity(const struct option *opt, const char *str, int unset)
 {
@@ -2224,6 +2230,33 @@ out_free:
        return ret;
 }
 
+static int parse_control_option(const struct option *opt,
+                               const char *str,
+                               int unset __maybe_unused)
+{
+       char *comma = NULL, *endptr = NULL;
+       struct record_opts *config = (struct record_opts *)opt->value;
+
+       if (strncmp(str, "fd:", 3))
+               return -EINVAL;
+
+       config->ctl_fd = strtoul(&str[3], &endptr, 0);
+       if (endptr == &str[3])
+               return -EINVAL;
+
+       comma = strchr(str, ',');
+       if (comma) {
+               if (endptr != comma)
+                       return -EINVAL;
+
+               config->ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
+               if (endptr == comma + 1 || *endptr != '\0')
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void switch_output_size_warn(struct record *rec)
 {
        u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
@@ -2360,6 +2393,8 @@ static struct record record = {
                },
                .mmap_flush          = MMAP_FLUSH_DEFAULT,
                .nr_threads_synthesize = 1,
+               .ctl_fd              = -1,
+               .ctl_fd_ack          = -1,
        },
        .tool = {
                .sample         = process_sample_event,
@@ -2462,8 +2497,8 @@ static struct option __record_options[] = {
        OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
                     "monitor event in cgroup name only",
                     parse_cgroups),
-       OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
-                 "ms to wait before starting measurement after program start"),
+       OPT_INTEGER('D', "delay", &record.opts.initial_delay,
+                 "ms to wait before starting measurement after program start (-1: start with events disabled)"),
        OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
        OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
                   "user to profile"),
@@ -2561,6 +2596,10 @@ static struct option __record_options[] = {
                "libpfm4 event selector. use 'perf list' to list available events",
                parse_libpfm_events_option),
 #endif
+       OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd]",
+                    "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
+                    "\t\t\t  Optionally send control command completion ('ack\\n') to ack-fd descriptor.",
+                     parse_control_option),
        OPT_END()
 };
 
@@ -2722,7 +2761,7 @@ int cmd_record(int argc, const char **argv)
                record.opts.tail_synthesize = true;
 
        if (rec->evlist->core.nr_entries == 0 &&
-           __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
+           __evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
                pr_err("Not enough memory for event selector list\n");
                goto out;
        }
@@ -2766,6 +2805,14 @@ int cmd_record(int argc, const char **argv)
        if (rec->opts.full_auxtrace)
                rec->buildid_all = true;
 
+       if (rec->opts.text_poke) {
+               err = record__config_text_poke(rec->evlist);
+               if (err) {
+                       pr_err("record__config_text_poke failed, error %d\n", err);
+                       goto out;
+               }
+       }
+
        if (record_opts__config(&rec->opts)) {
                err = -EINVAL;
                goto out;
index 5f1d2a8..ece1cdd 100644 (file)
@@ -338,7 +338,7 @@ static int process_read_event(struct perf_tool *tool,
 static int report__setup_sample_type(struct report *rep)
 {
        struct perf_session *session = rep->session;
-       u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
+       u64 sample_type = evlist__combined_sample_type(session->evlist);
        bool is_pipe = perf_data__is_pipe(session->data);
 
        if (session->itrace_synth_opts->callchain ||
@@ -410,8 +410,7 @@ static int report__setup_sample_type(struct report *rep)
        }
 
        /* ??? handle more cases than just ANY? */
-       if (!(perf_evlist__combined_branch_type(session->evlist) &
-                               PERF_SAMPLE_BRANCH_ANY))
+       if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
                rep->nonany_branch_mode = true;
 
 #if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
@@ -1093,7 +1092,7 @@ static int process_attr(struct perf_tool *tool __maybe_unused,
         * Check if we need to enable callchains based
         * on events sample_type.
         */
-       sample_type = perf_evlist__combined_sample_type(*pevlist);
+       sample_type = evlist__combined_sample_type(*pevlist);
        callchain_param_setup(sample_type);
        return 0;
 }
@@ -1389,7 +1388,7 @@ repeat:
 
        has_br_stack = perf_header__has_feat(&session->header,
                                             HEADER_BRANCH_STACK);
-       if (perf_evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
+       if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
                has_br_stack = false;
 
        setup_forced_leader(&report, session->evlist);
index 459e422..0c7d599 100644 (file)
@@ -2398,6 +2398,15 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
        printf("\n");
 }
 
+static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
+                                       union perf_event *event __maybe_unused,
+                                       struct evsel *evsel __maybe_unused,
+                                       struct perf_sample *sample __maybe_unused,
+                                       struct machine *machine __maybe_unused)
+{
+       return 0;
+}
+
 static int timehist_sched_wakeup_event(struct perf_tool *tool,
                                       union perf_event *event __maybe_unused,
                                       struct evsel *evsel,
@@ -2958,9 +2967,10 @@ static int timehist_check_attr(struct perf_sched *sched,
 
 static int perf_sched__timehist(struct perf_sched *sched)
 {
-       const struct evsel_str_handler handlers[] = {
+       struct evsel_str_handler handlers[] = {
                { "sched:sched_switch",       timehist_sched_switch_event, },
                { "sched:sched_wakeup",       timehist_sched_wakeup_event, },
+               { "sched:sched_waking",       timehist_sched_wakeup_event, },
                { "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
        };
        const struct evsel_str_handler migrate_handlers[] = {
@@ -3018,6 +3028,11 @@ static int perf_sched__timehist(struct perf_sched *sched)
 
        setup_pager();
 
+       /* prefer sched_waking if it is captured */
+       if (perf_evlist__find_tracepoint_by_name(session->evlist,
+                                                 "sched:sched_waking"))
+               handlers[1].handler = timehist_sched_wakeup_ignore;
+
        /* setup per-evsel handlers */
        if (perf_session__set_tracepoints_handlers(session, handlers))
                goto out;
@@ -3330,12 +3345,16 @@ static int __cmd_record(int argc, const char **argv)
                "-e", "sched:sched_stat_iowait",
                "-e", "sched:sched_stat_runtime",
                "-e", "sched:sched_process_fork",
-               "-e", "sched:sched_wakeup",
                "-e", "sched:sched_wakeup_new",
                "-e", "sched:sched_migrate_task",
        };
+       struct tep_event *waking_event;
 
-       rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+       /*
+        * +2 for either "-e", "sched:sched_wakeup" or
+        * "-e", "sched:sched_waking"
+        */
+       rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1;
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
        if (rec_argv == NULL)
@@ -3344,6 +3363,13 @@ static int __cmd_record(int argc, const char **argv)
        for (i = 0; i < ARRAY_SIZE(record_args); i++)
                rec_argv[i] = strdup(record_args[i]);
 
+       rec_argv[i++] = "-e";
+       waking_event = trace_event__tp_format("sched", "sched_waking");
+       if (!IS_ERR(waking_event))
+               rec_argv[i++] = strdup("sched:sched_waking");
+       else
+               rec_argv[i++] = strdup("sched:sched_wakeup");
+
        for (j = 1; j < (unsigned int)argc; j++, i++)
                rec_argv[i] = argv[j];
 
index 4474577..484ce60 100644 (file)
@@ -82,38 +82,64 @@ static bool                 native_arch;
 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
 
 enum perf_output_field {
-       PERF_OUTPUT_COMM            = 1U << 0,
-       PERF_OUTPUT_TID             = 1U << 1,
-       PERF_OUTPUT_PID             = 1U << 2,
-       PERF_OUTPUT_TIME            = 1U << 3,
-       PERF_OUTPUT_CPU             = 1U << 4,
-       PERF_OUTPUT_EVNAME          = 1U << 5,
-       PERF_OUTPUT_TRACE           = 1U << 6,
-       PERF_OUTPUT_IP              = 1U << 7,
-       PERF_OUTPUT_SYM             = 1U << 8,
-       PERF_OUTPUT_DSO             = 1U << 9,
-       PERF_OUTPUT_ADDR            = 1U << 10,
-       PERF_OUTPUT_SYMOFFSET       = 1U << 11,
-       PERF_OUTPUT_SRCLINE         = 1U << 12,
-       PERF_OUTPUT_PERIOD          = 1U << 13,
-       PERF_OUTPUT_IREGS           = 1U << 14,
-       PERF_OUTPUT_BRSTACK         = 1U << 15,
-       PERF_OUTPUT_BRSTACKSYM      = 1U << 16,
-       PERF_OUTPUT_DATA_SRC        = 1U << 17,
-       PERF_OUTPUT_WEIGHT          = 1U << 18,
-       PERF_OUTPUT_BPF_OUTPUT      = 1U << 19,
-       PERF_OUTPUT_CALLINDENT      = 1U << 20,
-       PERF_OUTPUT_INSN            = 1U << 21,
-       PERF_OUTPUT_INSNLEN         = 1U << 22,
-       PERF_OUTPUT_BRSTACKINSN     = 1U << 23,
-       PERF_OUTPUT_BRSTACKOFF      = 1U << 24,
-       PERF_OUTPUT_SYNTH           = 1U << 25,
-       PERF_OUTPUT_PHYS_ADDR       = 1U << 26,
-       PERF_OUTPUT_UREGS           = 1U << 27,
-       PERF_OUTPUT_METRIC          = 1U << 28,
-       PERF_OUTPUT_MISC            = 1U << 29,
-       PERF_OUTPUT_SRCCODE         = 1U << 30,
-       PERF_OUTPUT_IPC             = 1U << 31,
+       PERF_OUTPUT_COMM            = 1ULL << 0,
+       PERF_OUTPUT_TID             = 1ULL << 1,
+       PERF_OUTPUT_PID             = 1ULL << 2,
+       PERF_OUTPUT_TIME            = 1ULL << 3,
+       PERF_OUTPUT_CPU             = 1ULL << 4,
+       PERF_OUTPUT_EVNAME          = 1ULL << 5,
+       PERF_OUTPUT_TRACE           = 1ULL << 6,
+       PERF_OUTPUT_IP              = 1ULL << 7,
+       PERF_OUTPUT_SYM             = 1ULL << 8,
+       PERF_OUTPUT_DSO             = 1ULL << 9,
+       PERF_OUTPUT_ADDR            = 1ULL << 10,
+       PERF_OUTPUT_SYMOFFSET       = 1ULL << 11,
+       PERF_OUTPUT_SRCLINE         = 1ULL << 12,
+       PERF_OUTPUT_PERIOD          = 1ULL << 13,
+       PERF_OUTPUT_IREGS           = 1ULL << 14,
+       PERF_OUTPUT_BRSTACK         = 1ULL << 15,
+       PERF_OUTPUT_BRSTACKSYM      = 1ULL << 16,
+       PERF_OUTPUT_DATA_SRC        = 1ULL << 17,
+       PERF_OUTPUT_WEIGHT          = 1ULL << 18,
+       PERF_OUTPUT_BPF_OUTPUT      = 1ULL << 19,
+       PERF_OUTPUT_CALLINDENT      = 1ULL << 20,
+       PERF_OUTPUT_INSN            = 1ULL << 21,
+       PERF_OUTPUT_INSNLEN         = 1ULL << 22,
+       PERF_OUTPUT_BRSTACKINSN     = 1ULL << 23,
+       PERF_OUTPUT_BRSTACKOFF      = 1ULL << 24,
+       PERF_OUTPUT_SYNTH           = 1ULL << 25,
+       PERF_OUTPUT_PHYS_ADDR       = 1ULL << 26,
+       PERF_OUTPUT_UREGS           = 1ULL << 27,
+       PERF_OUTPUT_METRIC          = 1ULL << 28,
+       PERF_OUTPUT_MISC            = 1ULL << 29,
+       PERF_OUTPUT_SRCCODE         = 1ULL << 30,
+       PERF_OUTPUT_IPC             = 1ULL << 31,
+       PERF_OUTPUT_TOD             = 1ULL << 32,
+};
+
+struct perf_script {
+       struct perf_tool        tool;
+       struct perf_session     *session;
+       bool                    show_task_events;
+       bool                    show_mmap_events;
+       bool                    show_switch_events;
+       bool                    show_namespace_events;
+       bool                    show_lost_events;
+       bool                    show_round_events;
+       bool                    show_bpf_events;
+       bool                    show_cgroup_events;
+       bool                    show_text_poke_events;
+       bool                    allocated;
+       bool                    per_event_dump;
+       bool                    stitch_lbr;
+       struct evswitch         evswitch;
+       struct perf_cpu_map     *cpus;
+       struct perf_thread_map *threads;
+       int                     name_width;
+       const char              *time_str;
+       struct perf_time_interval *ptime_range;
+       int                     range_size;
+       int                     range_num;
 };
 
 struct output_option {
@@ -152,6 +178,7 @@ struct output_option {
        {.str = "misc", .field = PERF_OUTPUT_MISC},
        {.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
        {.str = "ipc", .field = PERF_OUTPUT_IPC},
+       {.str = "tod", .field = PERF_OUTPUT_TOD},
 };
 
 enum {
@@ -388,7 +415,7 @@ static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *
        return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
 }
 
-static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *session)
+static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
 {
        struct perf_event_attr *attr = &evsel->core.attr;
        bool allow_user_set;
@@ -443,8 +470,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *sess
                return -EINVAL;
        }
        if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
-           !(perf_evlist__combined_branch_type(session->evlist) &
-             PERF_SAMPLE_BRANCH_ANY)) {
+           !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) {
                pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
                       "Hint: run 'perf record -b ...'\n");
                return -EINVAL;
@@ -503,6 +529,7 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
  */
 static int perf_session__check_output_opt(struct perf_session *session)
 {
+       bool tod = false;
        unsigned int j;
        struct evsel *evsel;
 
@@ -522,13 +549,14 @@ static int perf_session__check_output_opt(struct perf_session *session)
                }
 
                if (evsel && output[j].fields &&
-                       perf_evsel__check_attr(evsel, session))
+                       evsel__check_attr(evsel, session))
                        return -1;
 
                if (evsel == NULL)
                        continue;
 
                set_print_ip_opts(&evsel->core.attr);
+               tod |= output[j].fields & PERF_OUTPUT_TOD;
        }
 
        if (!no_callchain) {
@@ -569,13 +597,17 @@ static int perf_session__check_output_opt(struct perf_session *session)
                }
        }
 
+       if (tod && !session->header.env.clock.enabled) {
+               pr_err("Can't provide 'tod' time, missing clock data. "
+                      "Please record with -k/--clockid option.\n");
+               return -1;
+       }
 out:
        return 0;
 }
 
 static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
-                                    FILE *fp
-)
+                                    FILE *fp)
 {
        unsigned i = 0, r;
        int printed = 0;
@@ -593,6 +625,56 @@ static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
        return printed;
 }
 
+#define DEFAULT_TOD_FMT "%F %H:%M:%S"
+
+static char*
+tod_scnprintf(struct perf_script *script, char *buf, int buflen,
+            u64 timestamp)
+{
+       u64 tod_ns, clockid_ns;
+       struct perf_env *env;
+       unsigned long nsec;
+       struct tm ltime;
+       char date[64];
+       time_t sec;
+
+       buf[0] = '\0';
+       if (buflen < 64 || !script)
+               return buf;
+
+       env = &script->session->header.env;
+       if (!env->clock.enabled) {
+               scnprintf(buf, buflen, "disabled");
+               return buf;
+       }
+
+       clockid_ns = env->clock.clockid_ns;
+       tod_ns     = env->clock.tod_ns;
+
+       if (timestamp > clockid_ns)
+               tod_ns += timestamp - clockid_ns;
+       else
+               tod_ns -= clockid_ns - timestamp;
+
+       sec  = (time_t) (tod_ns / NSEC_PER_SEC);
+       nsec = tod_ns - sec * NSEC_PER_SEC;
+
+       if (localtime_r(&sec, &ltime) == NULL) {
+               scnprintf(buf, buflen, "failed");
+       } else {
+               strftime(date, sizeof(date), DEFAULT_TOD_FMT, &ltime);
+
+               if (symbol_conf.nanosecs) {
+                       snprintf(buf, buflen, "%s.%09lu", date, nsec);
+               } else {
+                       snprintf(buf, buflen, "%s.%06lu",
+                                date, nsec / NSEC_PER_USEC);
+               }
+       }
+
+       return buf;
+}
+
 static int perf_sample__fprintf_iregs(struct perf_sample *sample,
                                      struct perf_event_attr *attr, FILE *fp)
 {
@@ -607,7 +689,8 @@ static int perf_sample__fprintf_uregs(struct perf_sample *sample,
                                         attr->sample_regs_user, fp);
 }
 
-static int perf_sample__fprintf_start(struct perf_sample *sample,
+static int perf_sample__fprintf_start(struct perf_script *script,
+                                     struct perf_sample *sample,
                                      struct thread *thread,
                                      struct evsel *evsel,
                                      u32 type, FILE *fp)
@@ -616,6 +699,7 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
        unsigned long secs;
        unsigned long long nsecs;
        int printed = 0;
+       char tstr[128];
 
        if (PRINT_FIELD(COMM)) {
                if (latency_format)
@@ -684,6 +768,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
                printed += ret;
        }
 
+       if (PRINT_FIELD(TOD)) {
+               tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
+               printed += fprintf(fp, "%s ", tstr);
+       }
+
        if (PRINT_FIELD(TIME)) {
                u64 t = sample->time;
                if (reltime) {
@@ -1668,31 +1757,7 @@ static int perf_sample__fprintf_synth(struct perf_sample *sample,
        return 0;
 }
 
-struct perf_script {
-       struct perf_tool        tool;
-       struct perf_session     *session;
-       bool                    show_task_events;
-       bool                    show_mmap_events;
-       bool                    show_switch_events;
-       bool                    show_namespace_events;
-       bool                    show_lost_events;
-       bool                    show_round_events;
-       bool                    show_bpf_events;
-       bool                    show_cgroup_events;
-       bool                    allocated;
-       bool                    per_event_dump;
-       bool                    stitch_lbr;
-       struct evswitch         evswitch;
-       struct perf_cpu_map     *cpus;
-       struct perf_thread_map *threads;
-       int                     name_width;
-       const char              *time_str;
-       struct perf_time_interval *ptime_range;
-       int                     range_size;
-       int                     range_num;
-};
-
-static int perf_evlist__max_name_len(struct evlist *evlist)
+static int evlist__max_name_len(struct evlist *evlist)
 {
        struct evsel *evsel;
        int max = 0;
@@ -1739,7 +1804,7 @@ static void script_print_metric(struct perf_stat_config *config __maybe_unused,
 
        if (!fmt)
                return;
-       perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+       perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
                                   PERF_RECORD_SAMPLE, mctx->fp);
        fputs("\tmetric: ", mctx->fp);
        if (color)
@@ -1754,7 +1819,7 @@ static void script_new_line(struct perf_stat_config *config __maybe_unused,
 {
        struct metric_ctx *mctx = ctx;
 
-       perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+       perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
                                   PERF_RECORD_SAMPLE, mctx->fp);
        fputs("\tmetric: ", mctx->fp);
 }
@@ -1865,7 +1930,7 @@ static void process_event(struct perf_script *script,
 
        ++es->samples;
 
-       perf_sample__fprintf_start(sample, thread, evsel,
+       perf_sample__fprintf_start(script, sample, thread, evsel,
                                   PERF_RECORD_SAMPLE, fp);
 
        if (PRINT_FIELD(PERIOD))
@@ -1875,7 +1940,7 @@ static void process_event(struct perf_script *script,
                const char *evname = evsel__name(evsel);
 
                if (!script->name_width)
-                       script->name_width = perf_evlist__max_name_len(script->session->evlist);
+                       script->name_width = evlist__max_name_len(script->session->evlist);
 
                fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
        }
@@ -2120,7 +2185,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        }
 
        if (evsel->core.attr.sample_type) {
-               err = perf_evsel__check_attr(evsel, scr->session);
+               err = evsel__check_attr(evsel, scr->session);
                if (err)
                        return err;
        }
@@ -2129,7 +2194,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
         * Check if we need to enable callchains based
         * on events sample_type.
         */
-       sample_type = perf_evlist__combined_sample_type(evlist);
+       sample_type = evlist__combined_sample_type(evlist);
        callchain_param_setup(sample_type);
 
        /* Enable fields for callchain entries */
@@ -2174,11 +2239,11 @@ static int print_event_with_time(struct perf_tool *tool,
                thread = machine__findnew_thread(machine, pid, tid);
 
        if (thread && evsel) {
-               perf_sample__fprintf_start(sample, thread, evsel,
+               perf_sample__fprintf_start(script, sample, thread, evsel,
                                           event->header.type, stdout);
        }
 
-       perf_event__fprintf(event, stdout);
+       perf_event__fprintf(event, machine, stdout);
 
        thread__put(thread);
 
@@ -2313,7 +2378,7 @@ process_finished_round_event(struct perf_tool *tool __maybe_unused,
                             struct ordered_events *oe __maybe_unused)
 
 {
-       perf_event__fprintf(event, stdout);
+       perf_event__fprintf(event, NULL, stdout);
        return 0;
 }
 
@@ -2330,6 +2395,18 @@ process_bpf_events(struct perf_tool *tool __maybe_unused,
                           sample->tid);
 }
 
+static int process_text_poke_events(struct perf_tool *tool,
+                                   union perf_event *event,
+                                   struct perf_sample *sample,
+                                   struct machine *machine)
+{
+       if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
+               return -1;
+
+       return print_event(tool, event, sample, machine, sample->pid,
+                          sample->tid);
+}
+
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
@@ -2438,6 +2515,10 @@ static int __cmd_script(struct perf_script *script)
                script->tool.ksymbol = process_bpf_events;
                script->tool.bpf     = process_bpf_events;
        }
+       if (script->show_text_poke_events) {
+               script->tool.ksymbol   = process_bpf_events;
+               script->tool.text_poke = process_text_poke_events;
+       }
 
        if (perf_script__setup_per_event_dump(script)) {
                pr_err("Couldn't create the per event dump files\n");
@@ -3171,7 +3252,7 @@ static int have_cmd(int argc, const char **argv)
 static void script__setup_sample_type(struct perf_script *script)
 {
        struct perf_session *session = script->session;
-       u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
+       u64 sample_type = evlist__combined_sample_type(session->evlist);
 
        if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
                if ((sample_type & PERF_SAMPLE_REGS_USER) &&
@@ -3423,7 +3504,7 @@ int cmd_script(int argc, const char **argv)
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
                     "addr,symoff,srcline,period,iregs,uregs,brstack,"
                     "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
-                    "callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc",
+                    "callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc,tod",
                     parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
@@ -3474,6 +3555,8 @@ int cmd_script(int argc, const char **argv)
                    "Show round events (if recorded)"),
        OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
                    "Show bpf related events (if recorded)"),
+       OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events,
+                   "Show text poke related events (if recorded)"),
        OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
                    "Dump trace output to files named by the monitored events"),
        OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
index 9be020e..483a28e 100644 (file)
@@ -188,6 +188,8 @@ static struct perf_stat_config stat_config = {
        .metric_only_len        = METRIC_ONLY_LEN,
        .walltime_nsecs_stats   = &walltime_nsecs_stats,
        .big_num                = true,
+       .ctl_fd                 = -1,
+       .ctl_fd_ack             = -1
 };
 
 static bool cpus_map_matched(struct evsel *a, struct evsel *b)
@@ -475,18 +477,38 @@ static void process_interval(void)
        print_counters(&rs, 0, NULL);
 }
 
+static bool handle_interval(unsigned int interval, int *times)
+{
+       if (interval) {
+               process_interval();
+               if (interval_count && !(--(*times)))
+                       return true;
+       }
+       return false;
+}
+
 static void enable_counters(void)
 {
-       if (stat_config.initial_delay)
+       if (stat_config.initial_delay < 0) {
+               pr_info(EVLIST_DISABLED_MSG);
+               return;
+       }
+
+       if (stat_config.initial_delay > 0) {
+               pr_info(EVLIST_DISABLED_MSG);
                usleep(stat_config.initial_delay * USEC_PER_MSEC);
+       }
 
        /*
         * We need to enable counters only if:
         * - we don't have tracee (attaching to task or cpu)
         * - we have initial delay configured
         */
-       if (!target__none(&target) || stat_config.initial_delay)
+       if (!target__none(&target) || stat_config.initial_delay) {
                evlist__enable(evsel_list);
+               if (stat_config.initial_delay > 0)
+                       pr_info(EVLIST_ENABLED_MSG);
+       }
 }
 
 static void disable_counters(void)
@@ -540,6 +562,86 @@ static bool is_target_alive(struct target *_target,
        return false;
 }
 
+static void process_evlist(struct evlist *evlist, unsigned int interval)
+{
+       enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
+
+       if (evlist__ctlfd_process(evlist, &cmd) > 0) {
+               switch (cmd) {
+               case EVLIST_CTL_CMD_ENABLE:
+                       pr_info(EVLIST_ENABLED_MSG);
+                       if (interval)
+                               process_interval();
+                       break;
+               case EVLIST_CTL_CMD_DISABLE:
+                       if (interval)
+                               process_interval();
+                       pr_info(EVLIST_DISABLED_MSG);
+                       break;
+               case EVLIST_CTL_CMD_ACK:
+               case EVLIST_CTL_CMD_UNSUPPORTED:
+               default:
+                       break;
+               }
+       }
+}
+
+static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
+                       int *time_to_sleep)
+{
+       int tts = *time_to_sleep;
+       struct timespec time_diff;
+
+       diff_timespec(&time_diff, time_stop, time_start);
+
+       tts -= time_diff.tv_sec * MSEC_PER_SEC +
+              time_diff.tv_nsec / NSEC_PER_MSEC;
+
+       if (tts < 0)
+               tts = 0;
+
+       *time_to_sleep = tts;
+}
+
+static int dispatch_events(bool forks, int timeout, int interval, int *times)
+{
+       int child_exited = 0, status = 0;
+       int time_to_sleep, sleep_time;
+       struct timespec time_start, time_stop;
+
+       if (interval)
+               sleep_time = interval;
+       else if (timeout)
+               sleep_time = timeout;
+       else
+               sleep_time = 1000;
+
+       time_to_sleep = sleep_time;
+
+       while (!done) {
+               if (forks)
+                       child_exited = waitpid(child_pid, &status, WNOHANG);
+               else
+                       child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
+
+               if (child_exited)
+                       break;
+
+               clock_gettime(CLOCK_MONOTONIC, &time_start);
+               if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
+                       if (timeout || handle_interval(interval, times))
+                               break;
+                       time_to_sleep = sleep_time;
+               } else { /* fd revent */
+                       process_evlist(evsel_list, interval);
+                       clock_gettime(CLOCK_MONOTONIC, &time_stop);
+                       compute_tts(&time_start, &time_stop, &time_to_sleep);
+               }
+       }
+
+       return status;
+}
+
 enum counter_recovery {
        COUNTER_SKIP,
        COUNTER_RETRY,
@@ -603,7 +705,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        char msg[BUFSIZ];
        unsigned long long t0, t1;
        struct evsel *counter;
-       struct timespec ts;
        size_t l;
        int status = 0;
        const bool forks = (argc > 0);
@@ -612,17 +713,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
        int i, cpu;
        bool second_pass = false;
 
-       if (interval) {
-               ts.tv_sec  = interval / USEC_PER_MSEC;
-               ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
-       } else if (timeout) {
-               ts.tv_sec  = timeout / USEC_PER_MSEC;
-               ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
-       } else {
-               ts.tv_sec  = 1;
-               ts.tv_nsec = 0;
-       }
-
        if (forks) {
                if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
                                                  workload_exec_failed_signal) < 0) {
@@ -779,16 +869,8 @@ try_again_reset:
                perf_evlist__start_workload(evsel_list);
                enable_counters();
 
-               if (interval || timeout) {
-                       while (!waitpid(child_pid, &status, WNOHANG)) {
-                               nanosleep(&ts, NULL);
-                               if (timeout)
-                                       break;
-                               process_interval();
-                               if (interval_count && !(--times))
-                                       break;
-                       }
-               }
+               if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
+                       status = dispatch_events(forks, timeout, interval, &times);
                if (child_pid != -1) {
                        if (timeout)
                                kill(child_pid, SIGTERM);
@@ -805,18 +887,7 @@ try_again_reset:
                        psignal(WTERMSIG(status), argv[0]);
        } else {
                enable_counters();
-               while (!done) {
-                       nanosleep(&ts, NULL);
-                       if (!is_target_alive(&target, evsel_list->core.threads))
-                               break;
-                       if (timeout)
-                               break;
-                       if (interval) {
-                               process_interval();
-                               if (interval_count && !(--times))
-                                       break;
-                       }
-               }
+               status = dispatch_events(forks, timeout, interval, &times);
        }
 
        disable_counters();
@@ -970,6 +1041,33 @@ static int parse_metric_groups(const struct option *opt,
                                         &stat_config.metric_events);
 }
 
+static int parse_control_option(const struct option *opt,
+                               const char *str,
+                               int unset __maybe_unused)
+{
+       char *comma = NULL, *endptr = NULL;
+       struct perf_stat_config *config = (struct perf_stat_config *)opt->value;
+
+       if (strncmp(str, "fd:", 3))
+               return -EINVAL;
+
+       config->ctl_fd = strtoul(&str[3], &endptr, 0);
+       if (endptr == &str[3])
+               return -EINVAL;
+
+       comma = strchr(str, ',');
+       if (comma) {
+               if (endptr != comma)
+                       return -EINVAL;
+
+               config->ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
+               if (endptr == comma + 1 || *endptr != '\0')
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static struct option stat_options[] = {
        OPT_BOOLEAN('T', "transaction", &transaction_run,
                    "hardware transaction statistics"),
@@ -1041,8 +1139,8 @@ static struct option stat_options[] = {
                     "aggregate counts per thread", AGGR_THREAD),
        OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
                     "aggregate counts per numa node", AGGR_NODE),
-       OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
-                    "ms to wait before starting measurement after program start"),
+       OPT_INTEGER('D', "delay", &stat_config.initial_delay,
+                   "ms to wait before starting measurement after program start (-1: start with events disabled)"),
        OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
                        "Only print computed metrics. No raw values", enable_metric_only),
        OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
@@ -1071,6 +1169,10 @@ static struct option stat_options[] = {
                "libpfm4 event selector. use 'perf list' to list available events",
                parse_libpfm_events_option),
 #endif
+       OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd]",
+                    "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
+                    "\t\t\t  Optionally send control command completion ('ack\\n') to ack-fd descriptor.",
+                     parse_control_option),
        OPT_END()
 };
 
@@ -1679,19 +1781,17 @@ static int add_default_attributes(void)
                if (target__has_cpu(&target))
                        default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
 
-               if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
+               if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
                        return -1;
                if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
-                       if (perf_evlist__add_default_attrs(evsel_list,
-                                               frontend_attrs) < 0)
+                       if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
                                return -1;
                }
                if (pmu_have_event("cpu", "stalled-cycles-backend")) {
-                       if (perf_evlist__add_default_attrs(evsel_list,
-                                               backend_attrs) < 0)
+                       if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
                                return -1;
                }
-               if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
+               if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
                        return -1;
        }
 
@@ -1701,21 +1801,21 @@ static int add_default_attributes(void)
                return 0;
 
        /* Append detailed run extra attributes: */
-       if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
+       if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 2)
                return 0;
 
        /* Append very detailed run extra attributes: */
-       if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
+       if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
                return -1;
 
        if (detailed_run < 3)
                return 0;
 
        /* Append very, very detailed run extra attributes: */
-       return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
+       return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
 }
 
 static const char * const stat_record_usage[] = {
@@ -2242,6 +2342,9 @@ int cmd_stat(int argc, const char **argv)
        signal(SIGALRM, skip_signal);
        signal(SIGABRT, skip_signal);
 
+       if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
+               goto out;
+
        status = 0;
        for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
                if (stat_config.run_count != 1 && verbose > 0)
@@ -2261,6 +2364,8 @@ int cmd_stat(int argc, const char **argv)
        if (!forever && status != -1 && (!interval || stat_config.summary))
                print_counters(NULL, argc, argv);
 
+       evlist__finalize_ctlfd(evsel_list);
+
        if (STAT_RECORD) {
                /*
                 * We synthesize the kernel mmap record just so that older tools
@@ -2307,6 +2412,7 @@ out:
 
        evlist__delete(evsel_list);
 
+       metricgroup__rblist_exit(&stat_config.metric_events);
        runtime_stat_delete(&stat_config);
 
        return status;
index 13889d7..994c230 100644 (file)
@@ -1627,7 +1627,7 @@ int cmd_top(int argc, const char **argv)
                goto out_delete_evlist;
 
        if (!top.evlist->core.nr_entries &&
-           perf_evlist__add_default(top.evlist) < 0) {
+           evlist__add_default(top.evlist) < 0) {
                pr_err("Not enough memory for event selector list\n");
                goto out_delete_evlist;
        }
index 4cbb64e..bea461b 100644 (file)
@@ -3917,8 +3917,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        }
 
        if (trace->sched &&
-           perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
-                                  trace__sched_stat_runtime))
+           evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
                goto out_error_sched_stat_runtime;
        /*
         * If a global cgroup was set, apply it to all the events without an
@@ -4150,11 +4149,11 @@ out_error_raw_syscalls:
        goto out_error;
 
 out_error_mmap:
-       perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
+       evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
        goto out_error;
 
 out_error_open:
-       perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+       evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
 
 out_error:
        fprintf(trace->output, "%s\n", errbuf);
@@ -4813,7 +4812,7 @@ int cmd_trace(int argc, const char **argv)
                        "per thread proc mmap processing timeout in ms"),
        OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
                     trace__parse_cgroups),
-       OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
+       OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
                     "ms to wait before starting measurement after program "
                     "start"),
        OPTS_EVSWITCH(&trace.evswitch),
index 94c2bc2..0b4d643 100755 (executable)
@@ -128,6 +128,9 @@ check arch/x86/lib/insn.c             '-I "^#include [\"<]\(../include/\)*asm/in
 # diff non-symmetric files
 check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
 
+# These will require a beauty_check when we get some more like that
+check_2 tools/perf/trace/beauty/include/linux/socket.h include/linux/socket.h
+
 # check duplicated library files
 check_2 tools/perf/util/hashmap.h tools/lib/bpf/hashmap.h
 check_2 tools/perf/util/hashmap.c tools/lib/bpf/hashmap.c
index 80816d6..f8784c6 100644 (file)
@@ -60,7 +60,7 @@
     },
     {
         "BriefDescription": "Stalls due to short latency decimal floating ops.",
-        "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+        "MetricExpr": "dfu_stall_cpi - dflong_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "dfu_other_stall_cpi"
     },
@@ -72,7 +72,7 @@
     },
     {
         "BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
-        "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+        "MetricExpr": "dmiss_non_local_stall_cpi - dmiss_remote_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "dmiss_distant_stall_cpi"
     },
@@ -90,7 +90,7 @@
     },
     {
         "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
-        "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+        "MetricExpr": "dmiss_l2l3_stall_cpi - dmiss_l2l3_conflict_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "dmiss_l2l3_noconflict_stall_cpi"
     },
     },
     {
         "BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
-        "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+        "MetricExpr": "dmiss_l3miss_stall_cpi - dmiss_l21_l31_stall_cpi - dmiss_lmem_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "dmiss_non_local_stall_cpi"
     },
     },
     {
         "BriefDescription": "Stalls due to short latency double precision ops.",
-        "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+        "MetricExpr": "dp_stall_cpi - dplong_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "dp_other_stall_cpi"
     },
         "MetricName": "emq_full_stall_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+        "MetricExpr": "erat_miss_stall_cpi + emq_full_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "emq_stall_cpi"
     },
     },
     {
         "BriefDescription": "Completion stall due to execution units for other reasons.",
-        "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+        "MetricExpr": "exec_unit_stall_cpi - scalar_stall_cpi - vector_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "exec_unit_other_stall_cpi"
     },
     },
     {
         "BriefDescription": "Stalls due to short latency integer ops",
-        "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+        "MetricExpr": "fxu_stall_cpi - fxlong_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "fxu_other_stall_cpi"
     },
     },
     {
         "BriefDescription": "Instruction Completion Table other stalls",
-        "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+        "MetricExpr": "nothing_dispatched_cpi - ict_noslot_ic_miss_cpi - ict_noslot_br_mpred_icmiss_cpi - ict_noslot_br_mpred_cpi - ict_noslot_disp_held_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "ict_noslot_cyc_other_cpi"
     },
     },
     {
         "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
-        "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+        "MetricExpr": "ict_noslot_disp_held_cpi - ict_noslot_disp_held_hb_full_cpi - ict_noslot_disp_held_sync_cpi - ict_noslot_disp_held_tbegin_cpi - ict_noslot_disp_held_issq_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "ict_noslot_disp_held_other_cpi"
     },
     },
     {
         "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
-        "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+        "MetricExpr": "ict_noslot_ic_miss_cpi - ict_noslot_ic_l3_cpi - ict_noslot_ic_l3miss_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "ict_noslot_ic_l2_cpi"
     },
         "MetricName": "ict_noslot_ic_miss_cpi"
     },
     {
-        "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+        "MetricExpr": "ntc_issue_held_darq_full_cpi + ntc_issue_held_arb_cpi + ntc_issue_held_other_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "issue_hold_cpi"
     },
         "MetricName": "lrq_other_stall_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+        "MetricExpr": "lmq_full_stall_cpi + st_fwd_stall_cpi + lhs_stall_cpi + lsu_mfspr_stall_cpi + larx_stall_cpi + lrq_other_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "lrq_stall_cpi"
     },
         "MetricName": "lsaq_arb_stall_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+        "MetricExpr": "lrq_full_stall_cpi + srq_full_stall_cpi + lsaq_arb_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "lsaq_stall_cpi"
     },
     },
     {
         "BriefDescription": "Completion LSU stall for other reasons",
-        "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+        "MetricExpr": "lsu_stall_cpi - lsu_fin_stall_cpi - store_finish_stall_cpi - srq_stall_cpi - load_finish_stall_cpi + lsu_stall_dcache_miss_cpi - lrq_stall_cpi + emq_stall_cpi - lsaq_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "lsu_other_stall_cpi"
     },
     },
     {
         "BriefDescription": "Cycles unaccounted for.",
-        "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+        "MetricExpr": "run_cpi - completion_cpi - thread_block_stall_cpi - stall_cpi - nothing_dispatched_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "other_cpi"
     },
     {
         "BriefDescription": "Completion stall for other reasons",
-        "MetricExpr": "(PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+        "MetricExpr": "stall_cpi - ntc_disp_fin_stall_cpi - ntc_flush_stall_cpi - lsu_stall_cpi - exec_unit_stall_cpi - bru_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "other_stall_cpi"
     },
         "MetricName": "run_cyc_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+        "MetricExpr": "fxu_stall_cpi + dp_stall_cpi + dfu_stall_cpi + pm_stall_cpi + crypto_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "scalar_stall_cpi"
     },
         "MetricName": "srq_full_stall_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+        "MetricExpr": "store_data_stall_cpi + eieio_stall_cpi + stcx_stall_cpi + slb_stall_cpi + tend_stall_cpi + paste_stall_cpi + tlbie_stall_cpi + store_pipe_arb_stall_cpi + store_fin_arb_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "srq_stall_cpi"
     },
     },
     {
         "BriefDescription": "Vector stalls due to small latency double precision ops",
-        "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+        "MetricExpr": "vdp_stall_cpi - vdplong_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "vdp_other_stall_cpi"
     },
         "MetricName": "vdplong_stall_cpi"
     },
     {
-        "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+        "MetricExpr": "vfxu_stall_cpi + vdp_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "vector_stall_cpi"
     },
     },
     {
         "BriefDescription": "Vector stalls due to small latency integer ops",
-        "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+        "MetricExpr": "vfxu_stall_cpi - vfxlong_stall_cpi",
         "MetricGroup": "cpi_breakdown",
         "MetricName": "vfxu_other_stall_cpi"
     },
     },
     {
         "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
-        "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+        "MetricExpr": "dl1_reload_from_l31_mod_rate_percent + dl1_reload_from_l31_shr_rate_percent",
         "MetricName": "dl1_reload_from_l31_rate_percent"
     },
     {
     },
     {
         "BriefDescription": "Completion stall because a different thread was using the completion pipe",
-        "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+        "MetricExpr": "thread_block_stall_cpi - exception_stall_cpi - any_sync_stall_cpi - sync_pmu_int_stall_cpi - spec_finish_stall_cpi - flush_any_thread_stall_cpi - lsu_flush_next_stall_cpi - nested_tbegin_stall_cpi - nested_tend_stall_cpi - mtfpscr_stall_cpi",
         "MetricName": "other_thread_cmpl_stall"
     },
     {
index c121e52..8383a37 100644 (file)
        "MetricExpr": "(hv_24x7@PM_PB_CYC\\,chip\\=?@ )",
         "MetricName": "PowerBUS_Frequency",
         "ScaleUnit": "2.5e-7GHz"
+    },
+    {
+       "MetricExpr" : "nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT23@",
+       "MetricName" : "mcs01-read",
+       "MetricGroup" : "memory_bw",
+       "ScaleUnit": "6.1e-5MB"
+    },
+    {
+       "MetricExpr" : "nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT23@",
+       "MetricName" : "mcs23-read",
+       "MetricGroup" : "memory_bw",
+       "ScaleUnit": "6.1e-5MB"
+    },
+    {
+       "MetricExpr" : "nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT23@",
+       "MetricName" : "mcs01-write",
+       "MetricGroup" : "memory_bw",
+       "ScaleUnit": "6.1e-5MB"
+    },
+    {
+       "MetricExpr" : "nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT23@",
+       "MetricName" : "mcs23-write",
+       "MetricGroup" : "memory-bandwidth",
+       "ScaleUnit": "6.1e-5MB"
+    },
+    {
+       "MetricExpr" : "nest_powerbus0_imc@PM_PB_CYC@",
+       "MetricName" : "powerbus_freq",
+       "ScaleUnit": "1e-9GHz"
+    },
+    {
+       "MetricExpr" : "(nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT23@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT23@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT23@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT23@)",
+       "MetricName" : "Memory-bandwidth-MCS",
+       "MetricGroup" : "memory_bw",
+       "ScaleUnit": "6.1e-5MB"
     }
 ]
index cd00498..84352fc 100644 (file)
@@ -59,6 +59,7 @@ perf-y += genelf.o
 perf-y += api-io.o
 perf-y += demangle-java-test.o
 perf-y += pfm.o
+perf-y += parse-metric.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
        $(call rule_mkdir)
index 430024f..6cd4081 100644 (file)
@@ -53,6 +53,7 @@ Following tests are defined (with perf commands):
   perf record -i kill                           (test-record-no-inherit)
   perf record -n kill                           (test-record-no-samples)
   perf record -c 100 -P kill                    (test-record-period)
+  perf record -c 1 --pfm-events=cycles:period=2 (test-record-pfm-period)
   perf record -R kill                           (test-record-raw)
   perf stat -e cycles kill                      (test-stat-basic)
   perf stat kill                                (test-stat-default)
diff --git a/tools/perf/tests/attr/test-record-pfm-period b/tools/perf/tests/attr/test-record-pfm-period
new file mode 100644 (file)
index 0000000..368f5b8
--- /dev/null
@@ -0,0 +1,9 @@
+[config]
+command = record
+args    = --no-bpf-event -c 10000 --pfm-events=cycles:period=77777 kill >/dev/null 2>&1
+ret     = 1
+
+[event:base-record]
+sample_period=77777
+sample_type=7
+freq=0
index da5b6cc..d328caa 100644 (file)
@@ -338,6 +338,10 @@ static struct test generic_tests[] = {
                .func = test__demangle_java,
        },
        {
+               .desc = "Parse and process metrics",
+               .func = test__parse_metric,
+       },
+       {
                .func = NULL,
        },
 };
index 6fe221d..035c912 100644 (file)
@@ -678,7 +678,7 @@ static int do_test_code_reading(bool try_kcore)
 
                        if (verbose > 0) {
                                char errbuf[512];
-                               perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+                               evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
                                pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
                        }
 
index 1cb02ca..4d01051 100644 (file)
@@ -18,14 +18,15 @@ static int test(struct expr_parse_ctx *ctx, const char *e, double val2)
 
 int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
 {
+       struct expr_id_data *val_ptr;
        const char *p;
-       double val, *val_ptr;
+       double val;
        int ret;
        struct expr_parse_ctx ctx;
 
        expr__ctx_init(&ctx);
-       expr__add_id(&ctx, strdup("FOO"), 1);
-       expr__add_id(&ctx, strdup("BAR"), 2);
+       expr__add_id_val(&ctx, strdup("FOO"), 1);
+       expr__add_id_val(&ctx, strdup("BAR"), 2);
 
        ret = test(&ctx, "1+1", 2);
        ret |= test(&ctx, "FOO+BAR", 3);
@@ -39,6 +40,14 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
        ret |= test(&ctx, "1+1 if 3*4 else 0", 2);
        ret |= test(&ctx, "1.1 + 2.1", 3.2);
        ret |= test(&ctx, ".1 + 2.", 2.1);
+       ret |= test(&ctx, "d_ratio(1, 2)", 0.5);
+       ret |= test(&ctx, "d_ratio(2.5, 0)", 0);
+       ret |= test(&ctx, "1.1 < 2.2", 1);
+       ret |= test(&ctx, "2.2 > 1.1", 1);
+       ret |= test(&ctx, "1.1 < 1.1", 0);
+       ret |= test(&ctx, "2.2 > 2.2", 0);
+       ret |= test(&ctx, "2.2 < 1.1", 0);
+       ret |= test(&ctx, "1.1 > 2.2", 0);
 
        if (ret)
                return ret;
index c7c81c4..d9eca8e 100644 (file)
@@ -12,6 +12,7 @@ static void fdarray__init_revents(struct fdarray *fda, short revents)
 
        for (fd = 0; fd < fda->nr; ++fd) {
                fda->entries[fd].fd      = fda->nr - fd;
+               fda->entries[fd].events  = revents;
                fda->entries[fd].revents = revents;
        }
 }
@@ -29,7 +30,7 @@ static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE
 
 int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_unused)
 {
-       int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
+       int nr_fds, err = TEST_FAIL;
        struct fdarray *fda = fdarray__new(5, 5);
 
        if (fda == NULL) {
@@ -55,7 +56,6 @@ int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_
 
        fdarray__init_revents(fda, POLLHUP);
        fda->entries[2].revents = POLLIN;
-       expected_fd[0] = fda->entries[2].fd;
 
        pr_debug("\nfiltering all but fda->entries[2]:");
        fdarray__fprintf_prefix(fda, "before", stderr);
@@ -66,17 +66,9 @@ int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_
                goto out_delete;
        }
 
-       if (fda->entries[0].fd != expected_fd[0]) {
-               pr_debug("\nfda->entries[0].fd=%d != %d\n",
-                        fda->entries[0].fd, expected_fd[0]);
-               goto out_delete;
-       }
-
        fdarray__init_revents(fda, POLLHUP);
        fda->entries[0].revents = POLLIN;
-       expected_fd[0] = fda->entries[0].fd;
        fda->entries[3].revents = POLLIN;
-       expected_fd[1] = fda->entries[3].fd;
 
        pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
        fdarray__fprintf_prefix(fda, "before", stderr);
@@ -88,14 +80,6 @@ int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_
                goto out_delete;
        }
 
-       for (fd = 0; fd < 2; ++fd) {
-               if (fda->entries[fd].fd != expected_fd[fd]) {
-                       pr_debug("\nfda->entries[%d].fd=%d != %d\n", fd,
-                                fda->entries[fd].fd, expected_fd[fd]);
-                       goto out_delete;
-               }
-       }
-
        pr_debug("\n");
 
        err = 0;
@@ -128,7 +112,7 @@ int test__fdarray__add(struct test *test __maybe_unused, int subtest __maybe_unu
        }
 
 #define FDA_ADD(_idx, _fd, _revents, _nr)                                 \
-       if (fdarray__add(fda, _fd, _revents) < 0) {                        \
+       if (fdarray__add(fda, _fd, _revents, fdarray_flag__default) < 0) { \
                pr_debug("\n%d: fdarray__add(fda, %d, %d) failed!",        \
                         __LINE__,_fd, _revents);                          \
                goto out_delete;                                           \
index 895188b..7f9f87a 100644 (file)
@@ -631,6 +631,34 @@ static int test__checkterms_simple(struct list_head *terms)
        TEST_ASSERT_VAL("wrong val", term->val.num == 1);
        TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
 
+       /*
+        * read
+        *
+        * The perf_pmu__test_parse_init injects 'read' term into
+        * perf_pmu_events_list, so 'read' is evaluated as read term
+        * and not as raw event with 'ead' hex value.
+        */
+       term = list_entry(term->list.next, struct parse_events_term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+       TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "read"));
+
+       /*
+        * r0xead
+        *
+        * To be still able to pass 'ead' value with 'r' syntax,
+        * we added support to parse 'r0xHEX' event.
+        */
+       term = list_entry(term->list.next, struct parse_events_term, list);
+       TEST_ASSERT_VAL("wrong type term",
+                       term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+       TEST_ASSERT_VAL("wrong type val",
+                       term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+       TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
+       TEST_ASSERT_VAL("wrong config", !term->config);
        return 0;
 }
 
@@ -1766,6 +1794,11 @@ static struct evlist_test test__events_pmu[] = {
                .check = test__checkevent_raw_pmu,
                .id    = 4,
        },
+       {
+               .name  = "software/r0x1a/",
+               .check = test__checkevent_raw_pmu,
+               .id    = 4,
+       },
 };
 
 struct terms_test {
@@ -1776,7 +1809,7 @@ struct terms_test {
 
 static struct terms_test test__terms[] = {
        [0] = {
-               .str   = "config=10,config1,config2=3,umask=1",
+               .str   = "config=10,config1,config2=3,umask=1,read,r0xead",
                .check = test__checkterms_simple,
        },
 };
@@ -1836,6 +1869,13 @@ static int test_term(struct terms_test *t)
 
        INIT_LIST_HEAD(&terms);
 
+       /*
+        * The perf_pmu__test_parse_init prepares perf_pmu_events_list
+        * which gets freed in parse_events_terms.
+        */
+       if (perf_pmu__test_parse_init())
+               return -1;
+
        ret = parse_events_terms(&terms, t->str);
        if (ret) {
                pr_debug("failed to parse terms '%s', err %d\n",
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
new file mode 100644 (file)
index 0000000..fc0838a
--- /dev/null
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <string.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include "metricgroup.h"
+#include "tests.h"
+#include "pmu-events/pmu-events.h"
+#include "evlist.h"
+#include "rblist.h"
+#include "debug.h"
+#include "expr.h"
+#include "stat.h"
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+
+static struct pmu_event pme_test[] = {
+{
+       .metric_expr    = "inst_retired.any / cpu_clk_unhalted.thread",
+       .metric_name    = "IPC",
+       .metric_group   = "group1",
+},
+{
+       .metric_expr    = "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
+                         "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
+       .metric_name    = "Frontend_Bound_SMT",
+},
+{
+       .metric_expr    = "l1d\\-loads\\-misses / inst_retired.any",
+       .metric_name    = "dcache_miss_cpi",
+},
+{
+       .metric_expr    = "l1i\\-loads\\-misses / inst_retired.any",
+       .metric_name    = "icache_miss_cycles",
+},
+{
+       .metric_expr    = "(dcache_miss_cpi + icache_miss_cycles)",
+       .metric_name    = "cache_miss_cycles",
+       .metric_group   = "group1",
+},
+{
+       .metric_expr    = "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
+       .metric_name    = "DCache_L2_All_Hits",
+},
+{
+       .metric_expr    = "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
+                         "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
+       .metric_name    = "DCache_L2_All_Miss",
+},
+{
+       .metric_expr    = "dcache_l2_all_hits + dcache_l2_all_miss",
+       .metric_name    = "DCache_L2_All",
+},
+{
+       .metric_expr    = "d_ratio(dcache_l2_all_hits, dcache_l2_all)",
+       .metric_name    = "DCache_L2_Hits",
+},
+{
+       .metric_expr    = "d_ratio(dcache_l2_all_miss, dcache_l2_all)",
+       .metric_name    = "DCache_L2_Misses",
+},
+{
+       .metric_expr    = "ipc + m2",
+       .metric_name    = "M1",
+},
+{
+       .metric_expr    = "ipc + m1",
+       .metric_name    = "M2",
+},
+{
+       .metric_expr    = "1/m3",
+       .metric_name    = "M3",
+}
+};
+
+static struct pmu_events_map map = {
+       .cpuid          = "test",
+       .version        = "1",
+       .type           = "core",
+       .table          = pme_test,
+};
+
+struct value {
+       const char      *event;
+       u64              val;
+};
+
+static u64 find_value(const char *name, struct value *values)
+{
+       struct value *v = values;
+
+       while (v->event) {
+               if (!strcmp(name, v->event))
+                       return v->val;
+               v++;
+       };
+       return 0;
+}
+
+static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
+                             struct value *vals)
+{
+       struct evsel *evsel;
+       u64 count;
+
+       evlist__for_each_entry(evlist, evsel) {
+               count = find_value(evsel->name, vals);
+               perf_stat__update_shadow_stats(evsel, count, 0, st);
+       }
+}
+
+static double compute_single(struct rblist *metric_events, struct evlist *evlist,
+                            struct runtime_stat *st, const char *name)
+{
+       struct metric_expr *mexp;
+       struct metric_event *me;
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               me = metricgroup__lookup(metric_events, evsel, false);
+               if (me != NULL) {
+                       list_for_each_entry (mexp, &me->head, nd) {
+                               if (strcmp(mexp->metric_name, name))
+                                       continue;
+                               return test_generic_metric(mexp, 0, st);
+                       }
+               }
+       }
+       return 0.;
+}
+
+static int __compute_metric(const char *name, struct value *vals,
+                           const char *name1, double *ratio1,
+                           const char *name2, double *ratio2)
+{
+       struct rblist metric_events = {
+               .nr_entries = 0,
+       };
+       struct perf_cpu_map *cpus;
+       struct runtime_stat st;
+       struct evlist *evlist;
+       int err;
+
+       /*
+        * We need to prepare evlist for stat mode running on CPU 0
+        * because that's where all the stats are going to be created.
+        */
+       evlist = evlist__new();
+       if (!evlist)
+               return -ENOMEM;
+
+       cpus = perf_cpu_map__new("0");
+       if (!cpus)
+               return -ENOMEM;
+
+       perf_evlist__set_maps(&evlist->core, cpus, NULL);
+
+       /* Parse the metric into metric_events list. */
+       err = metricgroup__parse_groups_test(evlist, &map, name,
+                                            false, false,
+                                            &metric_events);
+       if (err)
+               return err;
+
+       if (perf_evlist__alloc_stats(evlist, false))
+               return -1;
+
+       /* Load the runtime stats with given numbers for events. */
+       runtime_stat__init(&st);
+       load_runtime_stat(&st, evlist, vals);
+
+       /* And execute the metric */
+       if (name1 && ratio1)
+               *ratio1 = compute_single(&metric_events, evlist, &st, name1);
+       if (name2 && ratio2)
+               *ratio2 = compute_single(&metric_events, evlist, &st, name2);
+
+       /* ... clenup. */
+       metricgroup__rblist_exit(&metric_events);
+       runtime_stat__exit(&st);
+       perf_evlist__free_stats(evlist);
+       perf_cpu_map__put(cpus);
+       evlist__delete(evlist);
+       return 0;
+}
+
+static int compute_metric(const char *name, struct value *vals, double *ratio)
+{
+       return __compute_metric(name, vals, name, ratio, NULL, NULL);
+}
+
+static int compute_metric_group(const char *name, struct value *vals,
+                               const char *name1, double *ratio1,
+                               const char *name2, double *ratio2)
+{
+       return __compute_metric(name, vals, name1, ratio1, name2, ratio2);
+}
+
+static int test_ipc(void)
+{
+       double ratio;
+       struct value vals[] = {
+               { .event = "inst_retired.any",        .val = 300 },
+               { .event = "cpu_clk_unhalted.thread", .val = 200 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to compute metric",
+                       compute_metric("IPC", vals, &ratio) == 0);
+
+       TEST_ASSERT_VAL("IPC failed, wrong ratio",
+                       ratio == 1.5);
+       return 0;
+}
+
+static int test_frontend(void)
+{
+       double ratio;
+       struct value vals[] = {
+               { .event = "idq_uops_not_delivered.core",        .val = 300 },
+               { .event = "cpu_clk_unhalted.thread",            .val = 200 },
+               { .event = "cpu_clk_unhalted.one_thread_active", .val = 400 },
+               { .event = "cpu_clk_unhalted.ref_xclk",          .val = 600 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to compute metric",
+                       compute_metric("Frontend_Bound_SMT", vals, &ratio) == 0);
+
+       TEST_ASSERT_VAL("Frontend_Bound_SMT failed, wrong ratio",
+                       ratio == 0.45);
+       return 0;
+}
+
+static int test_cache_miss_cycles(void)
+{
+       double ratio;
+       struct value vals[] = {
+               { .event = "l1d-loads-misses",  .val = 300 },
+               { .event = "l1i-loads-misses",  .val = 200 },
+               { .event = "inst_retired.any",  .val = 400 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to compute metric",
+                       compute_metric("cache_miss_cycles", vals, &ratio) == 0);
+
+       TEST_ASSERT_VAL("cache_miss_cycles failed, wrong ratio",
+                       ratio == 1.25);
+       return 0;
+}
+
+
+/*
+ * DCache_L2_All_Hits = l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hi
+ * DCache_L2_All_Miss = max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) +
+ *                      l2_rqsts.pf_miss + l2_rqsts.rfo_miss
+ * DCache_L2_All      = dcache_l2_all_hits + dcache_l2_all_miss
+ * DCache_L2_Hits     = d_ratio(dcache_l2_all_hits, dcache_l2_all)
+ * DCache_L2_Misses   = d_ratio(dcache_l2_all_miss, dcache_l2_all)
+ *
+ * l2_rqsts.demand_data_rd_hit = 100
+ * l2_rqsts.pf_hit             = 200
+ * l2_rqsts.rfo_hi             = 300
+ * l2_rqsts.all_demand_data_rd = 400
+ * l2_rqsts.pf_miss            = 500
+ * l2_rqsts.rfo_miss           = 600
+ *
+ * DCache_L2_All_Hits = 600
+ * DCache_L2_All_Miss = MAX(400 - 100, 0) + 500 + 600 = 1400
+ * DCache_L2_All      = 600 + 1400  = 2000
+ * DCache_L2_Hits     = 600 / 2000  = 0.3
+ * DCache_L2_Misses   = 1400 / 2000 = 0.7
+ */
+static int test_dcache_l2(void)
+{
+       double ratio;
+       struct value vals[] = {
+               { .event = "l2_rqsts.demand_data_rd_hit", .val = 100 },
+               { .event = "l2_rqsts.pf_hit",             .val = 200 },
+               { .event = "l2_rqsts.rfo_hit",            .val = 300 },
+               { .event = "l2_rqsts.all_demand_data_rd", .val = 400 },
+               { .event = "l2_rqsts.pf_miss",            .val = 500 },
+               { .event = "l2_rqsts.rfo_miss",           .val = 600 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to compute metric",
+                       compute_metric("DCache_L2_Hits", vals, &ratio) == 0);
+
+       TEST_ASSERT_VAL("DCache_L2_Hits failed, wrong ratio",
+                       ratio == 0.3);
+
+       TEST_ASSERT_VAL("failed to compute metric",
+                       compute_metric("DCache_L2_Misses", vals, &ratio) == 0);
+
+       TEST_ASSERT_VAL("DCache_L2_Misses failed, wrong ratio",
+                       ratio == 0.7);
+       return 0;
+}
+
+static int test_recursion_fail(void)
+{
+       double ratio;
+       struct value vals[] = {
+               { .event = "inst_retired.any",        .val = 300 },
+               { .event = "cpu_clk_unhalted.thread", .val = 200 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to find recursion",
+                       compute_metric("M1", vals, &ratio) == -1);
+
+       TEST_ASSERT_VAL("failed to find recursion",
+                       compute_metric("M3", vals, &ratio) == -1);
+       return 0;
+}
+
+static int test_metric_group(void)
+{
+       double ratio1, ratio2;
+       struct value vals[] = {
+               { .event = "cpu_clk_unhalted.thread", .val = 200 },
+               { .event = "l1d-loads-misses",        .val = 300 },
+               { .event = "l1i-loads-misses",        .val = 200 },
+               { .event = "inst_retired.any",        .val = 400 },
+               { .event = NULL, },
+       };
+
+       TEST_ASSERT_VAL("failed to find recursion",
+                       compute_metric_group("group1", vals,
+                                            "IPC", &ratio1,
+                                            "cache_miss_cycles", &ratio2) == 0);
+
+       TEST_ASSERT_VAL("group IPC failed, wrong ratio",
+                       ratio1 == 2.0);
+
+       TEST_ASSERT_VAL("group cache_miss_cycles failed, wrong ratio",
+                       ratio2 == 1.25);
+       return 0;
+}
+
+int test__parse_metric(struct test *test __maybe_unused, int subtest __maybe_unused)
+{
+       TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
+       TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
+       TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
+       TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
+       TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
+       TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
+       return 0;
+}
index 83adfd8..67d3f5a 100644 (file)
@@ -185,14 +185,14 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
                                err = perf_evlist__parse_sample(evlist, event, &sample);
                                if (err < 0) {
                                        if (verbose > 0)
-                                               perf_event__fprintf(event, stderr);
+                                               perf_event__fprintf(event, NULL, stderr);
                                        pr_debug("Couldn't parse sample\n");
                                        goto out_delete_evlist;
                                }
 
                                if (verbose > 0) {
                                        pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
-                                       perf_event__fprintf(event, stderr);
+                                       perf_event__fprintf(event, NULL, stderr);
                                }
 
                                if (prev_time > sample.time) {
index ab64b4a..eb19f9a 100644 (file)
@@ -390,9 +390,9 @@ static bool is_number(const char *str)
        return errno == 0 && end_ptr != str;
 }
 
-static int check_parse_id(const char *id, bool same_cpu, struct pmu_event *pe)
+static int check_parse_id(const char *id, struct parse_events_error *error,
+                         struct perf_pmu *fake_pmu)
 {
-       struct parse_events_error error;
        struct evlist *evlist;
        int ret;
 
@@ -401,8 +401,18 @@ static int check_parse_id(const char *id, bool same_cpu, struct pmu_event *pe)
                return 0;
 
        evlist = evlist__new();
-       memset(&error, 0, sizeof(error));
-       ret = parse_events(evlist, id, &error);
+       if (!evlist)
+               return -ENOMEM;
+       ret = __parse_events(evlist, id, error, fake_pmu);
+       evlist__delete(evlist);
+       return ret;
+}
+
+static int check_parse_cpu(const char *id, bool same_cpu, struct pmu_event *pe)
+{
+       struct parse_events_error error = { .idx = 0, };
+
+       int ret = check_parse_id(id, &error, NULL);
        if (ret && same_cpu) {
                pr_warning("Parse event failed metric '%s' id '%s' expr '%s'\n",
                        pe->metric_name, id, pe->metric_expr);
@@ -413,7 +423,18 @@ static int check_parse_id(const char *id, bool same_cpu, struct pmu_event *pe)
                          id, pe->metric_name, pe->metric_expr);
                ret = 0;
        }
-       evlist__delete(evlist);
+       free(error.str);
+       free(error.help);
+       free(error.first_str);
+       free(error.first_help);
+       return ret;
+}
+
+static int check_parse_fake(const char *id)
+{
+       struct parse_events_error error = { .idx = 0, };
+       int ret = check_parse_id(id, &error, &perf_pmu__fake);
+
        free(error.str);
        free(error.help);
        free(error.first_str);
@@ -471,10 +492,10 @@ static int test_parsing(void)
                         */
                        k = 1;
                        hashmap__for_each_entry((&ctx.ids), cur, bkt)
-                               expr__add_id(&ctx, strdup(cur->key), k++);
+                               expr__add_id_val(&ctx, strdup(cur->key), k++);
 
                        hashmap__for_each_entry((&ctx.ids), cur, bkt) {
-                               if (check_parse_id(cur->key, map == cpus_map,
+                               if (check_parse_cpu(cur->key, map == cpus_map,
                                                   pe))
                                        ret++;
                        }
@@ -490,6 +511,100 @@ static int test_parsing(void)
        return ret == 0 ? TEST_OK : TEST_SKIP;
 }
 
+struct test_metric {
+       const char *str;
+};
+
+static struct test_metric metrics[] = {
+       { "(unc_p_power_state_occupancy.cores_c0 / unc_p_clockticks) * 100." },
+       { "imx8_ddr0@read\\-cycles@ * 4 * 4", },
+       { "imx8_ddr0@axid\\-read\\,axi_mask\\=0xffff\\,axi_id\\=0x0000@ * 4", },
+       { "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", },
+       { "(imx8_ddr0@read\\-cycles@ + imx8_ddr0@write\\-cycles@)", },
+};
+
+static int metric_parse_fake(const char *str)
+{
+       struct expr_parse_ctx ctx;
+       struct hashmap_entry *cur;
+       double result;
+       int ret = -1;
+       size_t bkt;
+       int i;
+
+       pr_debug("parsing '%s'\n", str);
+
+       expr__ctx_init(&ctx);
+       if (expr__find_other(str, NULL, &ctx, 0) < 0) {
+               pr_err("expr__find_other failed\n");
+               return -1;
+       }
+
+       /*
+        * Add all ids with a made up value. The value may
+        * trigger divide by zero when subtracted and so try to
+        * make them unique.
+        */
+       i = 1;
+       hashmap__for_each_entry((&ctx.ids), cur, bkt)
+               expr__add_id_val(&ctx, strdup(cur->key), i++);
+
+       hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+               if (check_parse_fake(cur->key)) {
+                       pr_err("check_parse_fake failed\n");
+                       goto out;
+               }
+       }
+
+       if (expr__parse(&result, &ctx, str, 1))
+               pr_err("expr__parse failed\n");
+       else
+               ret = 0;
+
+out:
+       expr__ctx_clear(&ctx);
+       return ret;
+}
+
+/*
+ * Parse all the metrics for current architecture,
+ * or all defined cpus via the 'fake_pmu'
+ * in parse_events.
+ */
+static int test_parsing_fake(void)
+{
+       struct pmu_events_map *map;
+       struct pmu_event *pe;
+       unsigned int i, j;
+       int err = 0;
+
+       for (i = 0; i < ARRAY_SIZE(metrics); i++) {
+               err = metric_parse_fake(metrics[i].str);
+               if (err)
+                       return err;
+       }
+
+       i = 0;
+       for (;;) {
+               map = &pmu_events_map[i++];
+               if (!map->table)
+                       break;
+               j = 0;
+               for (;;) {
+                       pe = &map->table[j++];
+                       if (!pe->name && !pe->metric_group && !pe->metric_name)
+                               break;
+                       if (!pe->metric_expr)
+                               continue;
+                       err = metric_parse_fake(pe->metric_expr);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 static const struct {
        int (*func)(void);
        const char *desc;
@@ -506,6 +621,10 @@ static const struct {
                .func = test_parsing,
                .desc = "Parsing of PMU event table metrics",
        },
+       {
+               .func = test_parsing_fake,
+               .desc = "Parsing of PMU event table metrics with fake PMUs",
+       },
 };
 
 const char *test__pmu_events_subtest_get_desc(int subtest)
index 54030c1..bf9e729 100755 (executable)
@@ -20,13 +20,13 @@ file=$(mktemp /tmp/temporary_file.XXXXX)
 
 record_open_file() {
        echo "Recording open file:"
-       perf record -o ${perfdata} -e probe:vfs_getname touch $file
+       perf record -o ${perfdata} -e probe:vfs_getname\* touch $file
 }
 
 perf_script_filenames() {
        echo "Looking at perf.data file for vfs_getname records for the file we touched:"
        perf script -i ${perfdata} | \
-       egrep " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname: +\([[:xdigit:]]+\) +pathname=\"${file}\""
+       egrep " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
 }
 
 add_probe_vfs_getname || skip_if_no_debuginfo
index 76a4e35..4447a51 100644 (file)
@@ -121,6 +121,7 @@ int test__demangle_java(struct test *test, int subtest);
 int test__pfm(struct test *test, int subtest);
 const char *test__pfm_subtest_get_desc(int subtest);
 int test__pfm_subtest_get_nr(void);
+int test__parse_metric(struct test *test, int subtest);
 
 bool test__bp_signal_is_supported(void);
 bool test__bp_account_is_supported(void);
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
new file mode 100644 (file)
index 0000000..e9cb30d
--- /dev/null
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SOCKET_H
+#define _LINUX_SOCKET_H
+
+
+#include <asm/socket.h>                        /* arch-dependent defines       */
+#include <linux/sockios.h>             /* the SIOCxxx I/O controls     */
+#include <linux/uio.h>                 /* iovec support                */
+#include <linux/types.h>               /* pid_t                        */
+#include <linux/compiler.h>            /* __user                       */
+#include <uapi/linux/socket.h>
+
+struct file;
+struct pid;
+struct cred;
+struct socket;
+
+#define __sockaddr_check_size(size)    \
+       BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
+
+#ifdef CONFIG_PROC_FS
+struct seq_file;
+extern void socket_seq_show(struct seq_file *seq);
+#endif
+
+typedef __kernel_sa_family_t   sa_family_t;
+
+/*
+ *     1003.1g requires sa_family_t and that sa_data is char.
+ */
+
+struct sockaddr {
+       sa_family_t     sa_family;      /* address family, AF_xxx       */
+       char            sa_data[14];    /* 14 bytes of protocol address */
+};
+
+struct linger {
+       int             l_onoff;        /* Linger active                */
+       int             l_linger;       /* How long to linger for       */
+};
+
+#define sockaddr_storage __kernel_sockaddr_storage
+
+/*
+ *     As we do 4.4BSD message passing we use a 4.4BSD message passing
+ *     system, not 4.3. Thus msg_accrights(len) are now missing. They
+ *     belong in an obscure libc emulation or the bin.
+ */
+
+struct msghdr {
+       void            *msg_name;      /* ptr to socket address structure */
+       int             msg_namelen;    /* size of socket address structure */
+       struct iov_iter msg_iter;       /* data */
+
+       /*
+        * Ancillary data. msg_control_user is the user buffer used for the
+        * recv* side when msg_control_is_user is set, msg_control is the kernel
+        * buffer used for all other cases.
+        */
+       union {
+               void            *msg_control;
+               void __user     *msg_control_user;
+       };
+       bool            msg_control_is_user : 1;
+       __kernel_size_t msg_controllen; /* ancillary data buffer length */
+       unsigned int    msg_flags;      /* flags on received message */
+       struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
+};
+
+struct user_msghdr {
+       void            __user *msg_name;       /* ptr to socket address structure */
+       int             msg_namelen;            /* size of socket address structure */
+       struct iovec    __user *msg_iov;        /* scatter/gather array */
+       __kernel_size_t msg_iovlen;             /* # elements in msg_iov */
+       void            __user *msg_control;    /* ancillary data */
+       __kernel_size_t msg_controllen;         /* ancillary data buffer length */
+       unsigned int    msg_flags;              /* flags on received message */
+};
+
+/* For recvmmsg/sendmmsg */
+struct mmsghdr {
+       struct user_msghdr  msg_hdr;
+       unsigned int        msg_len;
+};
+
+/*
+ *     POSIX 1003.1g - ancillary data object information
+ *     Ancillary data consits of a sequence of pairs of
+ *     (cmsghdr, cmsg_data[])
+ */
+
+struct cmsghdr {
+       __kernel_size_t cmsg_len;       /* data byte count, including hdr */
+        int            cmsg_level;     /* originating protocol */
+        int            cmsg_type;      /* protocol-specific type */
+};
+
+/*
+ *     Ancillary data object information MACROS
+ *     Table 5-14 of POSIX 1003.1g
+ */
+
+#define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg))
+#define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg))
+
+#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
+
+#define CMSG_DATA(cmsg) \
+       ((void *)(cmsg) + sizeof(struct cmsghdr))
+#define CMSG_USER_DATA(cmsg) \
+       ((void __user *)(cmsg) + sizeof(struct cmsghdr))
+#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
+#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
+
+#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
+                                 (struct cmsghdr *)(ctl) : \
+                                 (struct cmsghdr *)NULL)
+#define CMSG_FIRSTHDR(msg)     __CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen)
+#define CMSG_OK(mhdr, cmsg) ((cmsg)->cmsg_len >= sizeof(struct cmsghdr) && \
+                            (cmsg)->cmsg_len <= (unsigned long) \
+                            ((mhdr)->msg_controllen - \
+                             ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
+#define for_each_cmsghdr(cmsg, msg) \
+       for (cmsg = CMSG_FIRSTHDR(msg); \
+            cmsg; \
+            cmsg = CMSG_NXTHDR(msg, cmsg))
+
+/*
+ *     Get the next cmsg header
+ *
+ *     PLEASE, do not touch this function. If you think, that it is
+ *     incorrect, grep kernel sources and think about consequences
+ *     before trying to improve it.
+ *
+ *     Now it always returns valid, not truncated ancillary object
+ *     HEADER. But caller still MUST check, that cmsg->cmsg_len is
+ *     inside range, given by msg->msg_controllen before using
+ *     ancillary object DATA.                          --ANK (980731)
+ */
+
+static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
+                                              struct cmsghdr *__cmsg)
+{
+       struct cmsghdr * __ptr;
+
+       __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) +  CMSG_ALIGN(__cmsg->cmsg_len));
+       if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
+               return (struct cmsghdr *)0;
+
+       return __ptr;
+}
+
+static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
+{
+       return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
+}
+
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+       return iov_iter_count(&msg->msg_iter);
+}
+
+/* "Socket"-level control message types: */
+
+#define        SCM_RIGHTS      0x01            /* rw: access rights (array of int) */
+#define SCM_CREDENTIALS 0x02           /* rw: struct ucred             */
+#define SCM_SECURITY   0x03            /* rw: security label           */
+
+struct ucred {
+       __u32   pid;
+       __u32   uid;
+       __u32   gid;
+};
+
+/* Supported address families. */
+#define AF_UNSPEC      0
+#define AF_UNIX                1       /* Unix domain sockets          */
+#define AF_LOCAL       1       /* POSIX name for AF_UNIX       */
+#define AF_INET                2       /* Internet IP Protocol         */
+#define AF_AX25                3       /* Amateur Radio AX.25          */
+#define AF_IPX         4       /* Novell IPX                   */
+#define AF_APPLETALK   5       /* AppleTalk DDP                */
+#define AF_NETROM      6       /* Amateur Radio NET/ROM        */
+#define AF_BRIDGE      7       /* Multiprotocol bridge         */
+#define AF_ATMPVC      8       /* ATM PVCs                     */
+#define AF_X25         9       /* Reserved for X.25 project    */
+#define AF_INET6       10      /* IP version 6                 */
+#define AF_ROSE                11      /* Amateur Radio X.25 PLP       */
+#define AF_DECnet      12      /* Reserved for DECnet project  */
+#define AF_NETBEUI     13      /* Reserved for 802.2LLC project*/
+#define AF_SECURITY    14      /* Security callback pseudo AF */
+#define AF_KEY         15      /* PF_KEY key management API */
+#define AF_NETLINK     16
+#define AF_ROUTE       AF_NETLINK /* Alias to emulate 4.4BSD */
+#define AF_PACKET      17      /* Packet family                */
+#define AF_ASH         18      /* Ash                          */
+#define AF_ECONET      19      /* Acorn Econet                 */
+#define AF_ATMSVC      20      /* ATM SVCs                     */
+#define AF_RDS         21      /* RDS sockets                  */
+#define AF_SNA         22      /* Linux SNA Project (nutters!) */
+#define AF_IRDA                23      /* IRDA sockets                 */
+#define AF_PPPOX       24      /* PPPoX sockets                */
+#define AF_WANPIPE     25      /* Wanpipe API Sockets */
+#define AF_LLC         26      /* Linux LLC                    */
+#define AF_IB          27      /* Native InfiniBand address    */
+#define AF_MPLS                28      /* MPLS */
+#define AF_CAN         29      /* Controller Area Network      */
+#define AF_TIPC                30      /* TIPC sockets                 */
+#define AF_BLUETOOTH   31      /* Bluetooth sockets            */
+#define AF_IUCV                32      /* IUCV sockets                 */
+#define AF_RXRPC       33      /* RxRPC sockets                */
+#define AF_ISDN                34      /* mISDN sockets                */
+#define AF_PHONET      35      /* Phonet sockets               */
+#define AF_IEEE802154  36      /* IEEE802154 sockets           */
+#define AF_CAIF                37      /* CAIF sockets                 */
+#define AF_ALG         38      /* Algorithm sockets            */
+#define AF_NFC         39      /* NFC sockets                  */
+#define AF_VSOCK       40      /* vSockets                     */
+#define AF_KCM         41      /* Kernel Connection Multiplexor*/
+#define AF_QIPCRTR     42      /* Qualcomm IPC Router          */
+#define AF_SMC         43      /* smc sockets: reserve number for
+                                * PF_SMC protocol family that
+                                * reuses AF_INET address family
+                                */
+#define AF_XDP         44      /* XDP sockets                  */
+
+#define AF_MAX         45      /* For now.. */
+
+/* Protocol families, same as address families. */
+#define PF_UNSPEC      AF_UNSPEC
+#define PF_UNIX                AF_UNIX
+#define PF_LOCAL       AF_LOCAL
+#define PF_INET                AF_INET
+#define PF_AX25                AF_AX25
+#define PF_IPX         AF_IPX
+#define PF_APPLETALK   AF_APPLETALK
+#define        PF_NETROM       AF_NETROM
+#define PF_BRIDGE      AF_BRIDGE
+#define PF_ATMPVC      AF_ATMPVC
+#define PF_X25         AF_X25
+#define PF_INET6       AF_INET6
+#define PF_ROSE                AF_ROSE
+#define PF_DECnet      AF_DECnet
+#define PF_NETBEUI     AF_NETBEUI
+#define PF_SECURITY    AF_SECURITY
+#define PF_KEY         AF_KEY
+#define PF_NETLINK     AF_NETLINK
+#define PF_ROUTE       AF_ROUTE
+#define PF_PACKET      AF_PACKET
+#define PF_ASH         AF_ASH
+#define PF_ECONET      AF_ECONET
+#define PF_ATMSVC      AF_ATMSVC
+#define PF_RDS         AF_RDS
+#define PF_SNA         AF_SNA
+#define PF_IRDA                AF_IRDA
+#define PF_PPPOX       AF_PPPOX
+#define PF_WANPIPE     AF_WANPIPE
+#define PF_LLC         AF_LLC
+#define PF_IB          AF_IB
+#define PF_MPLS                AF_MPLS
+#define PF_CAN         AF_CAN
+#define PF_TIPC                AF_TIPC
+#define PF_BLUETOOTH   AF_BLUETOOTH
+#define PF_IUCV                AF_IUCV
+#define PF_RXRPC       AF_RXRPC
+#define PF_ISDN                AF_ISDN
+#define PF_PHONET      AF_PHONET
+#define PF_IEEE802154  AF_IEEE802154
+#define PF_CAIF                AF_CAIF
+#define PF_ALG         AF_ALG
+#define PF_NFC         AF_NFC
+#define PF_VSOCK       AF_VSOCK
+#define PF_KCM         AF_KCM
+#define PF_QIPCRTR     AF_QIPCRTR
+#define PF_SMC         AF_SMC
+#define PF_XDP         AF_XDP
+#define PF_MAX         AF_MAX
+
+/* Maximum queue length specifiable by listen.  */
+#define SOMAXCONN      4096
+
+/* Flags we can use with send/ and recv.
+   Added those for 1003.1g not all are supported yet
+ */
+
+#define MSG_OOB                1
+#define MSG_PEEK       2
+#define MSG_DONTROUTE  4
+#define MSG_TRYHARD     4       /* Synonym for MSG_DONTROUTE for DECnet */
+#define MSG_CTRUNC     8
+#define MSG_PROBE      0x10    /* Do not send. Only probe path f.e. for MTU */
+#define MSG_TRUNC      0x20
+#define MSG_DONTWAIT   0x40    /* Nonblocking io                */
+#define MSG_EOR         0x80   /* End of record */
+#define MSG_WAITALL    0x100   /* Wait for a full request */
+#define MSG_FIN         0x200
+#define MSG_SYN                0x400
+#define MSG_CONFIRM    0x800   /* Confirm path validity */
+#define MSG_RST                0x1000
+#define MSG_ERRQUEUE   0x2000  /* Fetch message from error queue */
+#define MSG_NOSIGNAL   0x4000  /* Do not generate SIGPIPE */
+#define MSG_MORE       0x8000  /* Sender will send more */
+#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
+#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#define MSG_BATCH      0x40000 /* sendmmsg(): more messages coming */
+#define MSG_EOF         MSG_FIN
+#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
+#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
+                                         * plain text and require encryption
+                                         */
+
+#define MSG_ZEROCOPY   0x4000000       /* Use user data in kernel path */
+#define MSG_FASTOPEN   0x20000000      /* Send data in TCP SYN */
+#define MSG_CMSG_CLOEXEC 0x40000000    /* Set close_on_exec for file
+                                          descriptor received through
+                                          SCM_RIGHTS */
+#if defined(CONFIG_COMPAT)
+#define MSG_CMSG_COMPAT        0x80000000      /* This message needs 32 bit fixups */
+#else
+#define MSG_CMSG_COMPAT        0               /* We never have 32 bit fixups */
+#endif
+
+
+/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+#define SOL_IP         0
+/* #define SOL_ICMP    1       No-no-no! Due to Linux :-) we cannot use SOL_ICMP=1 */
+#define SOL_TCP                6
+#define SOL_UDP                17
+#define SOL_IPV6       41
+#define SOL_ICMPV6     58
+#define SOL_SCTP       132
+#define SOL_UDPLITE    136     /* UDP-Lite (RFC 3828) */
+#define SOL_RAW                255
+#define SOL_IPX                256
+#define SOL_AX25       257
+#define SOL_ATALK      258
+#define SOL_NETROM     259
+#define SOL_ROSE       260
+#define SOL_DECNET     261
+#define        SOL_X25         262
+#define SOL_PACKET     263
+#define SOL_ATM                264     /* ATM layer (cell level) */
+#define SOL_AAL                265     /* ATM Adaption Layer (packet level) */
+#define SOL_IRDA        266
+#define SOL_NETBEUI    267
+#define SOL_LLC                268
+#define SOL_DCCP       269
+#define SOL_NETLINK    270
+#define SOL_TIPC       271
+#define SOL_RXRPC      272
+#define SOL_PPPOL2TP   273
+#define SOL_BLUETOOTH  274
+#define SOL_PNPIPE     275
+#define SOL_RDS                276
+#define SOL_IUCV       277
+#define SOL_CAIF       278
+#define SOL_ALG                279
+#define SOL_NFC                280
+#define SOL_KCM                281
+#define SOL_TLS                282
+#define SOL_XDP                283
+
+/* IPX options */
+#define IPX_TYPE       1
+
+extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
+extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+
+struct timespec64;
+struct __kernel_timespec;
+struct old_timespec32;
+
+struct scm_timestamping_internal {
+       struct timespec64 ts[3];
+};
+
+extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss);
+extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss);
+
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
+ * forbid_cmsg_compat==false
+ */
+extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
+                         unsigned int flags, bool forbid_cmsg_compat);
+extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
+                         unsigned int flags, bool forbid_cmsg_compat);
+extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+                         unsigned int vlen, unsigned int flags,
+                         struct __kernel_timespec __user *timeout,
+                         struct old_timespec32 __user *timeout32);
+extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
+                         unsigned int vlen, unsigned int flags,
+                         bool forbid_cmsg_compat);
+extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg,
+                              unsigned int flags);
+extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
+                              struct user_msghdr __user *umsg,
+                              struct sockaddr __user *uaddr,
+                              unsigned int flags);
+extern int sendmsg_copy_msghdr(struct msghdr *msg,
+                              struct user_msghdr __user *umsg, unsigned flags,
+                              struct iovec **iov);
+extern int recvmsg_copy_msghdr(struct msghdr *msg,
+                              struct user_msghdr __user *umsg, unsigned flags,
+                              struct sockaddr __user **uaddr,
+                              struct iovec **iov);
+extern int __copy_msghdr_from_user(struct msghdr *kmsg,
+                                  struct user_msghdr __user *umsg,
+                                  struct sockaddr __user **save_addr,
+                                  struct iovec __user **uiov, size_t *nsegs);
+
+/* helpers which do the actual work for syscalls */
+extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
+                         unsigned int flags, struct sockaddr __user *addr,
+                         int __user *addr_len);
+extern int __sys_sendto(int fd, void __user *buff, size_t len,
+                       unsigned int flags, struct sockaddr __user *addr,
+                       int addr_len);
+extern int __sys_accept4_file(struct file *file, unsigned file_flags,
+                       struct sockaddr __user *upeer_sockaddr,
+                        int __user *upeer_addrlen, int flags,
+                        unsigned long nofile);
+extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
+                        int __user *upeer_addrlen, int flags);
+extern int __sys_socket(int family, int type, int protocol);
+extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
+                             int addrlen, int file_flags);
+extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
+                        int addrlen);
+extern int __sys_listen(int fd, int backlog);
+extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
+                            int __user *usockaddr_len);
+extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
+                            int __user *usockaddr_len);
+extern int __sys_socketpair(int family, int type, int protocol,
+                           int __user *usockvec);
+extern int __sys_shutdown(int fd, int how);
+
+extern struct ns_common *get_net_ns(struct ns_common *ns);
+#endif /* _LINUX_SOCKET_H */
index e0c13e6..cd11063 100644 (file)
@@ -7,14 +7,7 @@
 #include <sys/un.h>
 #include <arpa/inet.h>
 
-static const char *socket_families[] = {
-       "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
-       "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
-       "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
-       "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
-       "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
-       "ALG", "NFC", "VSOCK",
-};
+#include "trace/beauty/generated/socket_arrays.c"
 DEFINE_STRARRAY(socket_families, "PF_");
 
 static size_t af_inet__scnprintf(struct sockaddr *sa, char *bf, size_t size)
diff --git a/tools/perf/trace/beauty/socket.sh b/tools/perf/trace/beauty/socket.sh
new file mode 100755 (executable)
index 0000000..3820e5c
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+# This one uses a copy from the kernel sources headers that is in a
+# place used just for these tools/perf/beauty/ usage, we shouldn't not
+# put it in tools/include/linux otherwise they would be used in the
+# normal compiler building process and would drag needless stuff from the
+# kernel.
+
+# When what these scripts need is already in tools/include/ then use it,
+# otherwise grab and check the copy from the kernel sources just for these
+# string table building scripts.
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/perf/trace/beauty/include/linux/
+
+printf "static const char *socket_families[] = {\n"
+# #define AF_LOCAL     1       /* POSIX name for AF_UNIX       */
+regex='^#define[[:space:]]+AF_(\w+)[[:space:]]+([[:digit:]]+).*'
+
+egrep $regex ${header_dir}/socket.h | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[%s] = \"%s\",\n" | \
+       egrep -v "\"(UNIX|MAX)\""
+printf "};\n"
index 9023267..bd77825 100644 (file)
@@ -209,7 +209,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                ui_browser__mark_fused(browser,
                                       pcnt_width + 3 + notes->widths.addr + width,
                                       from - 1,
-                                      to > from ? true : false);
+                                      to > from);
        }
 }
 
index 8d18380..cd5e419 100644 (file)
@@ -117,6 +117,7 @@ endif
 perf-y += parse-branch-options.o
 perf-y += dump-insn.o
 perf-y += parse-regs-options.o
+perf-y += parse-sublevel-options.o
 perf-y += term.o
 perf-y += help-unknown-cmd.o
 perf-y += mem-events.o
@@ -128,6 +129,7 @@ perf-y += expr-bison.o
 perf-y += expr.o
 perf-y += branch.o
 perf-y += mem2node.o
+perf-y += clockid.o
 
 perf-$(CONFIG_LIBBPF) += bpf-loader.o
 perf-$(CONFIG_LIBBPF) += bpf_map.o
@@ -191,36 +193,60 @@ CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
 # avoid compiler warnings in 32-bit mode
 CFLAGS_genelf_debug.o  += -Wno-packed
 
-$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
+$(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-flex.h: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l
+       $(Q)$(call echo-cmd,flex)$(FLEX) -o $(OUTPUT)util/parse-events-flex.c \
+               --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) $<
 
-$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
+$(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/parse-events-bison.h: util/parse-events.y
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_
+       $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+               -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
 
-$(OUTPUT)util/expr-flex.c: util/expr.l $(OUTPUT)util/expr-bison.c
+$(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/expr-bison.c
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/expr-flex.h $(PARSER_DEBUG_FLEX) util/expr.l
+       $(Q)$(call echo-cmd,flex)$(FLEX) -o $(OUTPUT)util/expr-flex.c \
+               --header-file=$(OUTPUT)util/expr-flex.h $(PARSER_DEBUG_FLEX) $<
 
-$(OUTPUT)util/expr-bison.c: util/expr.y
+$(OUTPUT)util/expr-bison.c $(OUTPUT)util/expr-bison.h: util/expr.y
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,bison)$(BISON) -v util/expr.y -d $(PARSER_DEBUG_BISON) -o $@ -p expr_
+       $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+               -o $(OUTPUT)util/expr-bison.c -p expr_
 
-$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
+$(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-flex.h: util/pmu.l $(OUTPUT)util/pmu-bison.c
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l
+       $(Q)$(call echo-cmd,flex)$(FLEX) -o $(OUTPUT)util/pmu-flex.c \
+               --header-file=$(OUTPUT)util/pmu-flex.h $(PARSER_DEBUG_FLEX) $<
 
-$(OUTPUT)util/pmu-bison.c: util/pmu.y
+$(OUTPUT)util/pmu-bison.c $(OUTPUT)util/pmu-bison.h: util/pmu.y
        $(call rule_mkdir)
-       $(Q)$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_
-
-CFLAGS_parse-events-flex.o  += -w
-CFLAGS_pmu-flex.o           += -w
-CFLAGS_expr-flex.o          += -w
-CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -w
-CFLAGS_pmu-bison.o          += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
-CFLAGS_expr-bison.o         += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
+       $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+               -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
+
+FLEX_GE_26 := $(shell expr $(shell $(FLEX) --version | sed -e  's/flex \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 26)
+ifeq ($(FLEX_GE_26),1)
+  flex_flags := -Wno-switch-enum -Wno-switch-default -Wno-unused-function -Wno-redundant-decls -Wno-sign-compare -Wno-unused-parameter -Wno-missing-prototypes -Wno-missing-declarations
+  CC_HASNT_MISLEADING_INDENTATION := $(shell echo "int main(void) { return 0 }" | $(CC) -Werror -Wno-misleading-indentation -o /dev/null -xc - 2>&1 | grep -q -- -Wno-misleading-indentation ; echo $$?)
+  ifeq ($(CC_HASNT_MISLEADING_INDENTATION), 1)
+    flex_flags += -Wno-misleading-indentation
+  endif
+else
+  flex_flags := -w
+endif
+CFLAGS_parse-events-flex.o  += $(flex_flags)
+CFLAGS_pmu-flex.o           += $(flex_flags)
+CFLAGS_expr-flex.o          += $(flex_flags)
+
+bison_flags := -DYYENABLE_NLS=0
+BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35)
+ifeq ($(BISON_GE_35),1)
+  bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum
+else
+  bison_flags += -w
+endif
+CFLAGS_parse-events-bison.o += $(bison_flags)
+CFLAGS_pmu-bison.o          += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+CFLAGS_expr-bison.o         += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
 
 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
 $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
index 76bfb4a..0a1fcf7 100644 (file)
@@ -1621,6 +1621,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        char *build_id_filename;
        char *build_id_path = NULL;
        char *pos;
+       int len;
 
        if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
            !dso__is_kcore(dso))
@@ -1649,10 +1650,16 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
                dirname(build_id_path);
 
-       if (dso__is_kcore(dso) ||
-           readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
-           strstr(linkname, DSO__NAME_KALLSYMS) ||
-           access(filename, R_OK)) {
+       if (dso__is_kcore(dso))
+               goto fallback;
+
+       len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
+       if (len < 0)
+               goto fallback;
+
+       linkname[len] = '\0';
+       if (strstr(linkname, DSO__NAME_KALLSYMS) ||
+               access(filename, R_OK)) {
 fallback:
                /*
                 * If we don't have build-ids or the build-id file isn't in the
index 25c639a..42a85c8 100644 (file)
@@ -1349,6 +1349,47 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
        synth_opts->initial_skip = 0;
 }
 
+static int get_flag(const char **ptr, unsigned int *flags)
+{
+       while (1) {
+               char c = **ptr;
+
+               if (c >= 'a' && c <= 'z') {
+                       *flags |= 1 << (c - 'a');
+                       ++*ptr;
+                       return 0;
+               } else if (c == ' ') {
+                       ++*ptr;
+                       continue;
+               } else {
+                       return -1;
+               }
+       }
+}
+
+static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags)
+{
+       while (1) {
+               switch (**ptr) {
+               case '+':
+                       ++*ptr;
+                       if (get_flag(ptr, plus_flags))
+                               return -1;
+                       break;
+               case '-':
+                       ++*ptr;
+                       if (get_flag(ptr, minus_flags))
+                               return -1;
+                       break;
+               case ' ':
+                       ++*ptr;
+                       break;
+               default:
+                       return 0;
+               }
+       }
+}
+
 /*
  * Please check tools/perf/Documentation/perf-script.txt for information
  * about the options parsed here, which is introduced after this cset,
@@ -1436,9 +1477,15 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
                        break;
                case 'e':
                        synth_opts->errors = true;
+                       if (get_flags(&p, &synth_opts->error_plus_flags,
+                                     &synth_opts->error_minus_flags))
+                               goto out_err;
                        break;
                case 'd':
                        synth_opts->log = true;
+                       if (get_flags(&p, &synth_opts->log_plus_flags,
+                                     &synth_opts->log_minus_flags))
+                               goto out_err;
                        break;
                case 'c':
                        synth_opts->branches = true;
@@ -1507,6 +1554,9 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
                case 'a':
                        synth_opts->remote_access = true;
                        break;
+               case 'q':
+                       synth_opts->quick += 1;
+                       break;
                case ' ':
                case ',':
                        break;
index 142ccf7..951d2d1 100644 (file)
@@ -55,6 +55,11 @@ enum itrace_period_type {
        PERF_ITRACE_PERIOD_NANOSECS,
 };
 
+#define AUXTRACE_ERR_FLG_OVERFLOW      (1 << ('o' - 'a'))
+#define AUXTRACE_ERR_FLG_DATA_LOST     (1 << ('l' - 'a'))
+
+#define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
+
 /**
  * struct itrace_synth_opts - AUX area tracing synthesis options.
  * @set: indicates whether or not options have been set
@@ -91,6 +96,11 @@ enum itrace_period_type {
  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
  * @ptime_range: time intervals to trace or NULL
  * @range_num: number of time intervals to trace
+ * @error_plus_flags: flags to affect what errors are reported
+ * @error_minus_flags: flags to affect what errors are reported
+ * @log_plus_flags: flags to affect what is logged
+ * @log_minus_flags: flags to affect what is logged
+ * @quick: quicker (less detailed) decoding
  */
 struct itrace_synth_opts {
        bool                    set;
@@ -124,6 +134,11 @@ struct itrace_synth_opts {
        unsigned long           *cpu_bitmap;
        struct perf_time_interval *ptime_range;
        int                     range_num;
+       unsigned int            error_plus_flags;
+       unsigned int            error_minus_flags;
+       unsigned int            log_plus_flags;
+       unsigned int            log_minus_flags;
+       unsigned int            quick;
 };
 
 /**
@@ -604,22 +619,32 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
                                 struct evsel *evsel);
 
 #define ITRACE_HELP \
-"                              i:                      synthesize instructions events\n"               \
+"                              i[period]:              synthesize instructions events\n" \
 "                              b:                      synthesize branches events (branch misses for Arm SPE)\n" \
 "                              c:                      synthesize branches events (calls only)\n"      \
 "                              r:                      synthesize branches events (returns only)\n" \
 "                              x:                      synthesize transactions events\n"               \
 "                              w:                      synthesize ptwrite events\n"            \
 "                              p:                      synthesize power events\n"                      \
-"                              e:                      synthesize error events\n"                      \
-"                              d:                      create a debug log\n"                   \
+"                              o:                      synthesize other events recorded due to the use\n" \
+"                                                      of aux-output (refer to perf record)\n" \
+"                              e[flags]:               synthesize error events\n" \
+"                                                      each flag must be preceded by + or -\n" \
+"                                                      error flags are: o (overflow)\n" \
+"                                                                       l (data lost)\n" \
+"                              d[flags]:               create a debug log\n" \
+"                                                      each flag must be preceded by + or -\n" \
+"                                                      log flags are: a (all perf events)\n" \
 "                              f:                      synthesize first level cache events\n" \
 "                              m:                      synthesize last level cache events\n" \
 "                              t:                      synthesize TLB events\n" \
 "                              a:                      synthesize remote access events\n" \
 "                              g[len]:                 synthesize a call chain (use with i or x)\n" \
+"                              G[len]:                 synthesize a call chain on existing event records\n" \
 "                              l[len]:                 synthesize last branch entries (use with i or x)\n" \
+"                              L[len]:                 synthesize last branch entries on existing event records\n" \
 "                              sNUMBER:                skip initial number of events\n"                \
+"                              q:                      quicker (less detailed) decoding\n" \
 "                              PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
 "                              concatenate multiple options. Default is ibxwpe or cewp\n"
 
index c076fc7..31207b6 100644 (file)
 #include "probe-file.h"
 #include "strlist.h"
 
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+#include <elfutils/debuginfod.h>
+#endif
+
 #include <linux/ctype.h>
 #include <linux/zalloc.h>
 
@@ -636,6 +640,21 @@ static char *build_id_cache__find_debug(const char *sbuild_id,
        if (realname && access(realname, R_OK))
                zfree(&realname);
        nsinfo__mountns_exit(&nsc);
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+        if (realname == NULL) {
+                debuginfod_client* c = debuginfod_begin();
+                if (c != NULL) {
+                        int fd = debuginfod_find_debuginfo(c,
+                                                           (const unsigned char*)sbuild_id, 0,
+                                                           &realname);
+                        if (fd >= 0)
+                                close(fd); /* retaining reference by realname */
+                        debuginfod_end(c);
+                }
+        }
+#endif
+
 out:
        free(debugfile);
        return realname;
diff --git a/tools/perf/util/clockid.c b/tools/perf/util/clockid.c
new file mode 100644 (file)
index 0000000..74365a5
--- /dev/null
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <subcmd/parse-options.h>
+#include <stdio.h>
+#include <time.h>
+#include <strings.h>
+#include <linux/time64.h>
+#include "debug.h"
+#include "clockid.h"
+#include "record.h"
+
+struct clockid_map {
+       const char *name;
+       int clockid;
+};
+
+#define CLOCKID_MAP(n, c)      \
+       { .name = n, .clockid = (c), }
+
+#define CLOCKID_END    { .name = NULL, }
+
+
+/*
+ * Add the missing ones, we need to build on many distros...
+ */
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW 4
+#endif
+#ifndef CLOCK_BOOTTIME
+#define CLOCK_BOOTTIME 7
+#endif
+#ifndef CLOCK_TAI
+#define CLOCK_TAI 11
+#endif
+
+static const struct clockid_map clockids[] = {
+       /* available for all events, NMI safe */
+       CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
+       CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
+
+       /* available for some events */
+       CLOCKID_MAP("realtime", CLOCK_REALTIME),
+       CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
+       CLOCKID_MAP("tai", CLOCK_TAI),
+
+       /* available for the lazy */
+       CLOCKID_MAP("mono", CLOCK_MONOTONIC),
+       CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
+       CLOCKID_MAP("real", CLOCK_REALTIME),
+       CLOCKID_MAP("boot", CLOCK_BOOTTIME),
+
+       CLOCKID_END,
+};
+
+static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
+{
+       struct timespec res;
+
+       *res_ns = 0;
+       if (!clock_getres(clk_id, &res))
+               *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
+       else
+               pr_warning("WARNING: Failed to determine specified clock resolution.\n");
+
+       return 0;
+}
+
+int parse_clockid(const struct option *opt, const char *str, int unset)
+{
+       struct record_opts *opts = (struct record_opts *)opt->value;
+       const struct clockid_map *cm;
+       const char *ostr = str;
+
+       if (unset) {
+               opts->use_clockid = 0;
+               return 0;
+       }
+
+       /* no arg passed */
+       if (!str)
+               return 0;
+
+       /* no setting it twice */
+       if (opts->use_clockid)
+               return -1;
+
+       opts->use_clockid = true;
+
+       /* if its a number, we're done */
+       if (sscanf(str, "%d", &opts->clockid) == 1)
+               return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
+
+       /* allow a "CLOCK_" prefix to the name */
+       if (!strncasecmp(str, "CLOCK_", 6))
+               str += 6;
+
+       for (cm = clockids; cm->name; cm++) {
+               if (!strcasecmp(str, cm->name)) {
+                       opts->clockid = cm->clockid;
+                       return get_clockid_res(opts->clockid,
+                                              &opts->clockid_res_ns);
+               }
+       }
+
+       opts->use_clockid = false;
+       ui__warning("unknown clockid %s, check man page\n", ostr);
+       return -1;
+}
+
+const char *clockid_name(clockid_t clk_id)
+{
+       const struct clockid_map *cm;
+
+       for (cm = clockids; cm->name; cm++) {
+               if (cm->clockid == clk_id)
+                       return cm->name;
+       }
+       return "(not found)";
+}
diff --git a/tools/perf/util/clockid.h b/tools/perf/util/clockid.h
new file mode 100644 (file)
index 0000000..9b49b47
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __PERF_CLOCKID_H
+#define __PERF_CLOCKID_H
+
+struct option;
+int parse_clockid(const struct option *opt, const char *str, int unset);
+
+const char *clockid_name(clockid_t clk_id);
+
+#endif
index 5f36fc6..27c5fef 100644 (file)
@@ -31,6 +31,9 @@
 #include "config.h"
 #include <linux/ctype.h>
 #include <linux/err.h>
+#include <linux/time64.h>
+#include "util.h"
+#include "clockid.h"
 
 #define pr_N(n, fmt, ...) \
        eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
@@ -1381,11 +1384,26 @@ do {                                                                    \
        return 0;
 }
 
-static int ctf_writer__setup_clock(struct ctf_writer *cw)
+static int ctf_writer__setup_clock(struct ctf_writer *cw,
+                                  struct perf_session *session,
+                                  bool tod)
 {
        struct bt_ctf_clock *clock = cw->clock;
+       const char *desc = "perf clock";
+       int64_t offset = 0;
 
-       bt_ctf_clock_set_description(clock, "perf clock");
+       if (tod) {
+               struct perf_env *env = &session->header.env;
+
+               if (!env->clock.enabled) {
+                       pr_err("Can't provide --tod time, missing clock data. "
+                              "Please record with -k/--clockid option.\n");
+                       return -1;
+               }
+
+               desc   = clockid_name(env->clock.clockid);
+               offset = env->clock.tod_ns - env->clock.clockid_ns;
+       }
 
 #define SET(__n, __v)                          \
 do {                                           \
@@ -1394,8 +1412,8 @@ do {                                              \
 } while (0)
 
        SET(frequency,   1000000000);
-       SET(offset_s,    0);
-       SET(offset,      0);
+       SET(offset,      offset);
+       SET(description, desc);
        SET(precision,   10);
        SET(is_absolute, 0);
 
@@ -1481,7 +1499,8 @@ static void ctf_writer__cleanup(struct ctf_writer *cw)
        memset(cw, 0, sizeof(*cw));
 }
 
-static int ctf_writer__init(struct ctf_writer *cw, const char *path)
+static int ctf_writer__init(struct ctf_writer *cw, const char *path,
+                           struct perf_session *session, bool tod)
 {
        struct bt_ctf_writer            *writer;
        struct bt_ctf_stream_class      *stream_class;
@@ -1505,7 +1524,7 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path)
 
        cw->clock = clock;
 
-       if (ctf_writer__setup_clock(cw)) {
+       if (ctf_writer__setup_clock(cw, session, tod)) {
                pr("Failed to setup CTF clock.\n");
                goto err_cleanup;
        }
@@ -1613,17 +1632,15 @@ int bt_convert__perf2ctf(const char *input, const char *path,
        if (err)
                return err;
 
-       /* CTF writer */
-       if (ctf_writer__init(cw, path))
-               return -1;
-
        err = -1;
        /* perf.data session */
        session = perf_session__new(&data, 0, &c.tool);
-       if (IS_ERR(session)) {
-               err = PTR_ERR(session);
-               goto free_writer;
-       }
+       if (IS_ERR(session))
+               return PTR_ERR(session);
+
+       /* CTF writer */
+       if (ctf_writer__init(cw, path, session, opts->tod))
+               goto free_session;
 
        if (c.queue_size) {
                ordered_events__set_alloc_size(&session->ordered_events,
@@ -1632,17 +1649,17 @@ int bt_convert__perf2ctf(const char *input, const char *path,
 
        /* CTF writer env/clock setup  */
        if (ctf_writer__setup_env(cw, session))
-               goto free_session;
+               goto free_writer;
 
        /* CTF events setup */
        if (setup_events(cw, session))
-               goto free_session;
+               goto free_writer;
 
        if (opts->all && setup_non_sample_events(cw, session))
-               goto free_session;
+               goto free_writer;
 
        if (setup_streams(cw, session))
-               goto free_session;
+               goto free_writer;
 
        err = perf_session__process_events(session);
        if (!err)
@@ -1670,10 +1687,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
 
        return err;
 
-free_session:
-       perf_session__delete(session);
 free_writer:
        ctf_writer__cleanup(cw);
+free_session:
+       perf_session__delete(session);
        pr_err("Error during conversion setup.\n");
        return err;
 }
index af90b60..feab5f1 100644 (file)
@@ -5,6 +5,7 @@
 struct perf_data_convert_opts {
        bool force;
        bool all;
+       bool tod;
 };
 
 #endif /* __DATA_CONVERT_H */
index adb6567..5cda556 100644 (file)
@@ -20,6 +20,7 @@
 #include "target.h"
 #include "ui/helpline.h"
 #include "ui/ui.h"
+#include "util/parse-sublevel-options.h"
 
 #include <linux/ctype.h>
 
@@ -173,65 +174,37 @@ void trace_event(union perf_event *event)
                     trace_event_printer, event);
 }
 
-static struct debug_variable {
-       const char *name;
-       int *ptr;
-} debug_variables[] = {
-       { .name = "verbose",            .ptr = &verbose },
-       { .name = "ordered-events",     .ptr = &debug_ordered_events},
-       { .name = "stderr",             .ptr = &redirect_to_stderr},
-       { .name = "data-convert",       .ptr = &debug_data_convert },
-       { .name = "perf-event-open",    .ptr = &debug_peo_args },
+static struct sublevel_option debug_opts[] = {
+       { .name = "verbose",            .value_ptr = &verbose },
+       { .name = "ordered-events",     .value_ptr = &debug_ordered_events},
+       { .name = "stderr",             .value_ptr = &redirect_to_stderr},
+       { .name = "data-convert",       .value_ptr = &debug_data_convert },
+       { .name = "perf-event-open",    .value_ptr = &debug_peo_args },
        { .name = NULL, }
 };
 
 int perf_debug_option(const char *str)
 {
-       struct debug_variable *var = &debug_variables[0];
-       char *vstr, *s = strdup(str);
-       int v = 1;
-
-       vstr = strchr(s, '=');
-       if (vstr)
-               *vstr++ = 0;
-
-       while (var->name) {
-               if (!strcmp(s, var->name))
-                       break;
-               var++;
-       }
-
-       if (!var->name) {
-               pr_err("Unknown debug variable name '%s'\n", s);
-               free(s);
-               return -1;
-       }
+       int ret;
 
-       if (vstr) {
-               v = atoi(vstr);
-               /*
-                * Allow only values in range (0, 10),
-                * otherwise set 0.
-                */
-               v = (v < 0) || (v > 10) ? 0 : v;
-       }
+       ret = perf_parse_sublevel_options(str, debug_opts);
+       if (ret)
+               return ret;
 
-       if (quiet)
-               v = -1;
+       /* Allow only verbose value in range (0, 10), otherwise set 0. */
+       verbose = (verbose < 0) || (verbose > 10) ? 0 : verbose;
 
-       *var->ptr = v;
-       free(s);
        return 0;
 }
 
 int perf_quiet_option(void)
 {
-       struct debug_variable *var = &debug_variables[0];
+       struct sublevel_option *opt = &debug_opts[0];
 
        /* disable all debug messages */
-       while (var->name) {
-               *var->ptr = -1;
-               var++;
+       while (opt->name) {
+               *opt->value_ptr = -1;
+               opt++;
        }
 
        return 0;
index 99f0a39..5a3b475 100644 (file)
@@ -208,6 +208,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
        case DSO_BINARY_TYPE__JAVA_JIT:
        case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__BPF_IMAGE:
+       case DSO_BINARY_TYPE__OOL:
        case DSO_BINARY_TYPE__NOT_FOUND:
                ret = -1;
                break;
@@ -898,6 +899,8 @@ static struct dso_cache *dso_cache__populate(struct dso *dso,
 
        if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
                *ret = bpf_read(dso, cache_offset, cache->data);
+       else if (dso->binary_type == DSO_BINARY_TYPE__OOL)
+               *ret = DSO__DATA_CACHE_SIZE;
        else
                *ret = file_read(dso, machine, cache_offset, cache->data);
 
@@ -1262,7 +1265,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
                dso->has_build_id = 0;
                dso->has_srcline = 1;
                dso->a2l_fails = 1;
-               dso->kernel = DSO_TYPE_USER;
+               dso->kernel = DSO_SPACE__USER;
                dso->needs_swap = DSO_SWAP__UNSET;
                dso->comp = COMP_ID__NONE;
                RB_CLEAR_NODE(&dso->rb_node);
index d3d0327..8ad17f3 100644 (file)
@@ -42,13 +42,14 @@ enum dso_binary_type {
        DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
        DSO_BINARY_TYPE__BPF_PROG_INFO,
        DSO_BINARY_TYPE__BPF_IMAGE,
+       DSO_BINARY_TYPE__OOL,
        DSO_BINARY_TYPE__NOT_FOUND,
 };
 
-enum dso_kernel_type {
-       DSO_TYPE_USER = 0,
-       DSO_TYPE_KERNEL,
-       DSO_TYPE_GUEST_KERNEL
+enum dso_space_type {
+       DSO_SPACE__USER = 0,
+       DSO_SPACE__KERNEL,
+       DSO_SPACE__KERNEL_GUEST
 };
 
 enum dso_swap_type {
@@ -159,7 +160,7 @@ struct dso {
        void             *a2l;
        char             *symsrc_filename;
        unsigned int     a2l_fails;
-       enum dso_kernel_type    kernel;
+       enum dso_space_type     kernel;
        enum dso_swap_type      needs_swap;
        enum dso_binary_type    symtab_type;
        enum dso_binary_type    binary_type;
index 1ab2682..a129726 100644 (file)
@@ -77,7 +77,6 @@ struct perf_env {
        struct numa_node        *numa_nodes;
        struct memory_node      *memory_nodes;
        unsigned long long       memory_bsize;
-       u64                     clockid_res_ns;
 
        /*
         * bpf_info_lock protects bpf rbtrees. This is needed because the
@@ -100,6 +99,19 @@ struct perf_env {
        /* For fast cpu to numa node lookup via perf_env__numa_node */
        int                     *numa_map;
        int                      nr_numa_map;
+
+       /* For real clock time reference. */
+       struct {
+               u64     tod_ns;
+               u64     clockid_ns;
+               u64     clockid_res_ns;
+               int     clockid;
+               /*
+                * enabled is valid for report mode, and is true if above
+                * values are set, it's set in process_clock_data
+                */
+               bool    enabled;
+       } clock;
 };
 
 enum perf_compress_type {
index f581550..317a265 100644 (file)
@@ -31,6 +31,7 @@
 #include "stat.h"
 #include "session.h"
 #include "bpf-event.h"
+#include "print_binary.h"
 #include "tool.h"
 #include "../perf.h"
 
@@ -55,6 +56,7 @@ static const char *perf_event__names[] = {
        [PERF_RECORD_KSYMBOL]                   = "KSYMBOL",
        [PERF_RECORD_BPF_EVENT]                 = "BPF_EVENT",
        [PERF_RECORD_CGROUP]                    = "CGROUP",
+       [PERF_RECORD_TEXT_POKE]                 = "TEXT_POKE",
        [PERF_RECORD_HEADER_ATTR]               = "ATTR",
        [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
        [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
@@ -267,6 +269,14 @@ int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
        return machine__process_bpf(machine, event, sample);
 }
 
+int perf_event__process_text_poke(struct perf_tool *tool __maybe_unused,
+                                 union perf_event *event,
+                                 struct perf_sample *sample,
+                                 struct machine *machine)
+{
+       return machine__process_text_poke(machine, event, sample);
+}
+
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 {
        return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
@@ -413,7 +423,52 @@ size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
                       event->bpf.type, event->bpf.flags, event->bpf.id);
 }
 
-size_t perf_event__fprintf(union perf_event *event, FILE *fp)
+static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
+                            void *extra, FILE *fp)
+{
+       bool old = *(bool *)extra;
+
+       switch ((int)op) {
+       case BINARY_PRINT_LINE_BEGIN:
+               return fprintf(fp, "            %s bytes:", old ? "Old" : "New");
+       case BINARY_PRINT_NUM_DATA:
+               return fprintf(fp, " %02x", val);
+       case BINARY_PRINT_LINE_END:
+               return fprintf(fp, "\n");
+       default:
+               return 0;
+       }
+}
+
+size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine, FILE *fp)
+{
+       struct perf_record_text_poke_event *tp = &event->text_poke;
+       size_t ret;
+       bool old;
+
+       ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr);
+       if (machine) {
+               struct addr_location al;
+
+               al.map = maps__find(&machine->kmaps, tp->addr);
+               if (al.map && map__load(al.map) >= 0) {
+                       al.addr = al.map->map_ip(al.map, tp->addr);
+                       al.sym = map__find_symbol(al.map, al.addr);
+                       if (al.sym)
+                               ret += symbol__fprintf_symname_offs(al.sym, &al, fp);
+               }
+       }
+       ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len);
+       old = true;
+       ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer,
+                              &old, fp);
+       old = false;
+       ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16,
+                              text_poke_printer, &old, fp);
+       return ret;
+}
+
+size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
 {
        size_t ret = fprintf(fp, "PERF_RECORD_%s",
                             perf_event__name(event->header.type));
@@ -457,6 +512,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
        case PERF_RECORD_BPF_EVENT:
                ret += perf_event__fprintf_bpf(event, fp);
                break;
+       case PERF_RECORD_TEXT_POKE:
+               ret += perf_event__fprintf_text_poke(event, machine, fp);
+               break;
        default:
                ret += fprintf(fp, "\n");
        }
index 6ae01c3..b828b99 100644 (file)
@@ -351,6 +351,10 @@ int perf_event__process_bpf(struct perf_tool *tool,
                            union perf_event *event,
                            struct perf_sample *sample,
                            struct machine *machine);
+int perf_event__process_text_poke(struct perf_tool *tool,
+                                 union perf_event *event,
+                                 struct perf_sample *sample,
+                                 struct machine *machine);
 int perf_event__process(struct perf_tool *tool,
                        union perf_event *event,
                        struct perf_sample *sample,
@@ -385,7 +389,8 @@ size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp);
-size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine,FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp);
 
 int kallsyms__get_function_start(const char *kallsyms_filename,
                                 const char *symbol_name, u64 *addr);
index ab48be4..e3fa3bf 100644 (file)
@@ -63,6 +63,9 @@ void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
        perf_evlist__set_maps(&evlist->core, cpus, threads);
        evlist->workload.pid = -1;
        evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
+       evlist->ctl_fd.fd = -1;
+       evlist->ctl_fd.ack = -1;
+       evlist->ctl_fd.pos = -1;
 }
 
 struct evlist *evlist__new(void)
@@ -79,7 +82,7 @@ struct evlist *perf_evlist__new_default(void)
 {
        struct evlist *evlist = evlist__new();
 
-       if (evlist && perf_evlist__add_default(evlist)) {
+       if (evlist && evlist__add_default(evlist)) {
                evlist__delete(evlist);
                evlist = NULL;
        }
@@ -91,7 +94,7 @@ struct evlist *perf_evlist__new_dummy(void)
 {
        struct evlist *evlist = evlist__new();
 
-       if (evlist && perf_evlist__add_dummy(evlist)) {
+       if (evlist && evlist__add_dummy(evlist)) {
                evlist__delete(evlist);
                evlist = NULL;
        }
@@ -231,7 +234,7 @@ void perf_evlist__set_leader(struct evlist *evlist)
        }
 }
 
-int __perf_evlist__add_default(struct evlist *evlist, bool precise)
+int __evlist__add_default(struct evlist *evlist, bool precise)
 {
        struct evsel *evsel = evsel__new_cycles(precise);
 
@@ -242,7 +245,7 @@ int __perf_evlist__add_default(struct evlist *evlist, bool precise)
        return 0;
 }
 
-int perf_evlist__add_dummy(struct evlist *evlist)
+int evlist__add_dummy(struct evlist *evlist)
 {
        struct perf_event_attr attr = {
                .type   = PERF_TYPE_SOFTWARE,
@@ -258,8 +261,7 @@ int perf_evlist__add_dummy(struct evlist *evlist)
        return 0;
 }
 
-static int evlist__add_attrs(struct evlist *evlist,
-                                 struct perf_event_attr *attrs, size_t nr_attrs)
+static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
 {
        struct evsel *evsel, *n;
        LIST_HEAD(head);
@@ -282,8 +284,7 @@ out_delete_partial_list:
        return -1;
 }
 
-int __perf_evlist__add_default_attrs(struct evlist *evlist,
-                                    struct perf_event_attr *attrs, size_t nr_attrs)
+int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
 {
        size_t i;
 
@@ -322,8 +323,7 @@ perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
        return NULL;
 }
 
-int perf_evlist__add_newtp(struct evlist *evlist,
-                          const char *sys, const char *name, void *handler)
+int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
 {
        struct evsel *evsel = evsel__newtp(sys, name);
 
@@ -500,7 +500,7 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
 
 int evlist__add_pollfd(struct evlist *evlist, int fd)
 {
-       return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
+       return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
 }
 
 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
@@ -540,7 +540,7 @@ struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
        if (sid)
                return container_of(sid->evsel, struct evsel, core);
 
-       if (!perf_evlist__sample_id_all(evlist))
+       if (!evlist__sample_id_all(evlist))
                return evlist__first(evlist);
 
        return NULL;
@@ -1088,7 +1088,7 @@ int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
        return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
 }
 
-bool perf_evlist__valid_sample_type(struct evlist *evlist)
+bool evlist__valid_sample_type(struct evlist *evlist)
 {
        struct evsel *pos;
 
@@ -1107,7 +1107,7 @@ bool perf_evlist__valid_sample_type(struct evlist *evlist)
        return true;
 }
 
-u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
+u64 __evlist__combined_sample_type(struct evlist *evlist)
 {
        struct evsel *evsel;
 
@@ -1120,13 +1120,13 @@ u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
        return evlist->combined_sample_type;
 }
 
-u64 perf_evlist__combined_sample_type(struct evlist *evlist)
+u64 evlist__combined_sample_type(struct evlist *evlist)
 {
        evlist->combined_sample_type = 0;
-       return __perf_evlist__combined_sample_type(evlist);
+       return __evlist__combined_sample_type(evlist);
 }
 
-u64 perf_evlist__combined_branch_type(struct evlist *evlist)
+u64 evlist__combined_branch_type(struct evlist *evlist)
 {
        struct evsel *evsel;
        u64 branch_type = 0;
@@ -1191,7 +1191,7 @@ out:
        return size;
 }
 
-bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
+bool evlist__valid_sample_id_all(struct evlist *evlist)
 {
        struct evsel *first = evlist__first(evlist), *pos = first;
 
@@ -1203,7 +1203,7 @@ bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
        return true;
 }
 
-bool perf_evlist__sample_id_all(struct evlist *evlist)
+bool evlist__sample_id_all(struct evlist *evlist)
 {
        struct evsel *first = evlist__first(evlist);
        return first->core.attr.sample_id_all;
@@ -1464,8 +1464,7 @@ int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
        return evsel__parse_sample_timestamp(evsel, event, timestamp);
 }
 
-int perf_evlist__strerror_open(struct evlist *evlist,
-                              int err, char *buf, size_t size)
+int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
 {
        int printed, value;
        char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
@@ -1518,7 +1517,7 @@ out_default:
        return 0;
 }
 
-int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
+int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
 {
        char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
        int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
@@ -1727,3 +1726,143 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
        }
        return leader;
 }
+
+int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
+{
+       if (fd == -1) {
+               pr_debug("Control descriptor is not initialized\n");
+               return 0;
+       }
+
+       evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
+                                                    fdarray_flag__nonfilterable);
+       if (evlist->ctl_fd.pos < 0) {
+               evlist->ctl_fd.pos = -1;
+               pr_err("Failed to add ctl fd entry: %m\n");
+               return -1;
+       }
+
+       evlist->ctl_fd.fd = fd;
+       evlist->ctl_fd.ack = ack;
+
+       return 0;
+}
+
+bool evlist__ctlfd_initialized(struct evlist *evlist)
+{
+       return evlist->ctl_fd.pos >= 0;
+}
+
+int evlist__finalize_ctlfd(struct evlist *evlist)
+{
+       struct pollfd *entries = evlist->core.pollfd.entries;
+
+       if (!evlist__ctlfd_initialized(evlist))
+               return 0;
+
+       entries[evlist->ctl_fd.pos].fd = -1;
+       entries[evlist->ctl_fd.pos].events = 0;
+       entries[evlist->ctl_fd.pos].revents = 0;
+
+       evlist->ctl_fd.pos = -1;
+       evlist->ctl_fd.ack = -1;
+       evlist->ctl_fd.fd = -1;
+
+       return 0;
+}
+
+static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
+                             char *cmd_data, size_t data_size)
+{
+       int err;
+       char c;
+       size_t bytes_read = 0;
+
+       memset(cmd_data, 0, data_size);
+       data_size--;
+
+       do {
+               err = read(evlist->ctl_fd.fd, &c, 1);
+               if (err > 0) {
+                       if (c == '\n' || c == '\0')
+                               break;
+                       cmd_data[bytes_read++] = c;
+                       if (bytes_read == data_size)
+                               break;
+               } else {
+                       if (err == -1)
+                               pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
+                       break;
+               }
+       } while (1);
+
+       pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
+                bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
+
+       if (err > 0) {
+               if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
+                            (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
+                       *cmd = EVLIST_CTL_CMD_ENABLE;
+               } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
+                                   (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
+                       *cmd = EVLIST_CTL_CMD_DISABLE;
+               }
+       }
+
+       return err;
+}
+
+static int evlist__ctlfd_ack(struct evlist *evlist)
+{
+       int err;
+
+       if (evlist->ctl_fd.ack == -1)
+               return 0;
+
+       err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
+                   sizeof(EVLIST_CTL_CMD_ACK_TAG));
+       if (err == -1)
+               pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
+
+       return err;
+}
+
+int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
+{
+       int err = 0;
+       char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
+       int ctlfd_pos = evlist->ctl_fd.pos;
+       struct pollfd *entries = evlist->core.pollfd.entries;
+
+       if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
+               return 0;
+
+       if (entries[ctlfd_pos].revents & POLLIN) {
+               err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
+                                        EVLIST_CTL_CMD_MAX_LEN);
+               if (err > 0) {
+                       switch (*cmd) {
+                       case EVLIST_CTL_CMD_ENABLE:
+                               evlist__enable(evlist);
+                               break;
+                       case EVLIST_CTL_CMD_DISABLE:
+                               evlist__disable(evlist);
+                               break;
+                       case EVLIST_CTL_CMD_ACK:
+                       case EVLIST_CTL_CMD_UNSUPPORTED:
+                       default:
+                               pr_debug("ctlfd: unsupported %d\n", *cmd);
+                               break;
+                       }
+                       if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED))
+                               evlist__ctlfd_ack(evlist);
+               }
+       }
+
+       if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
+               evlist__finalize_ctlfd(evlist);
+       else
+               entries[ctlfd_pos].revents = 0;
+
+       return err;
+}
index a8081df..c73f7f7 100644 (file)
@@ -74,6 +74,11 @@ struct evlist {
                pthread_t               th;
                volatile int            done;
        } thread;
+       struct {
+               int     fd;     /* control file descriptor */
+               int     ack;    /* ack file descriptor for control commands */
+               int     pos;    /* index at evlist core object to check signals */
+       } ctl_fd;
 };
 
 struct evsel_str_handler {
@@ -92,20 +97,20 @@ void evlist__delete(struct evlist *evlist);
 void evlist__add(struct evlist *evlist, struct evsel *entry);
 void evlist__remove(struct evlist *evlist, struct evsel *evsel);
 
-int __perf_evlist__add_default(struct evlist *evlist, bool precise);
+int __evlist__add_default(struct evlist *evlist, bool precise);
 
-static inline int perf_evlist__add_default(struct evlist *evlist)
+static inline int evlist__add_default(struct evlist *evlist)
 {
-       return __perf_evlist__add_default(evlist, true);
+       return __evlist__add_default(evlist, true);
 }
 
-int __perf_evlist__add_default_attrs(struct evlist *evlist,
+int __evlist__add_default_attrs(struct evlist *evlist,
                                     struct perf_event_attr *attrs, size_t nr_attrs);
 
-#define perf_evlist__add_default_attrs(evlist, array) \
-       __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+#define evlist__add_default_attrs(evlist, array) \
+       __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
 
-int perf_evlist__add_dummy(struct evlist *evlist);
+int evlist__add_dummy(struct evlist *evlist);
 
 int perf_evlist__add_sb_event(struct evlist *evlist,
                              struct perf_event_attr *attr,
@@ -116,8 +121,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
                                 struct target *target);
 void perf_evlist__stop_sb_thread(struct evlist *evlist);
 
-int perf_evlist__add_newtp(struct evlist *evlist,
-                          const char *sys, const char *name, void *handler);
+int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler);
 
 int __evlist__set_tracepoints_handlers(struct evlist *evlist,
                                       const struct evsel_str_handler *assocs,
@@ -219,10 +223,10 @@ int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
 void __perf_evlist__set_leader(struct list_head *list);
 void perf_evlist__set_leader(struct evlist *evlist);
 
-u64 __perf_evlist__combined_sample_type(struct evlist *evlist);
-u64 perf_evlist__combined_sample_type(struct evlist *evlist);
-u64 perf_evlist__combined_branch_type(struct evlist *evlist);
-bool perf_evlist__sample_id_all(struct evlist *evlist);
+u64 __evlist__combined_sample_type(struct evlist *evlist);
+u64 evlist__combined_sample_type(struct evlist *evlist);
+u64 evlist__combined_branch_type(struct evlist *evlist);
+bool evlist__sample_id_all(struct evlist *evlist);
 u16 perf_evlist__id_hdr_size(struct evlist *evlist);
 
 int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
@@ -232,8 +236,8 @@ int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
                                        union perf_event *event,
                                        u64 *timestamp);
 
-bool perf_evlist__valid_sample_type(struct evlist *evlist);
-bool perf_evlist__valid_sample_id_all(struct evlist *evlist);
+bool evlist__valid_sample_type(struct evlist *evlist);
+bool evlist__valid_sample_id_all(struct evlist *evlist);
 bool perf_evlist__valid_read_format(struct evlist *evlist);
 
 void perf_evlist__splice_list_tail(struct evlist *evlist,
@@ -258,8 +262,8 @@ static inline struct evsel *evlist__last(struct evlist *evlist)
        return container_of(evsel, struct evsel, core);
 }
 
-int perf_evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
-int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
+int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
+int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
 
 bool perf_evlist__can_select_event(struct evlist *evlist, const char *str);
 void perf_evlist__to_front(struct evlist *evlist,
@@ -356,4 +360,25 @@ void perf_evlist__force_leader(struct evlist *evlist);
 struct evsel *perf_evlist__reset_weak_group(struct evlist *evlist,
                                                 struct evsel *evsel,
                                                bool close);
+#define EVLIST_CTL_CMD_ENABLE_TAG  "enable"
+#define EVLIST_CTL_CMD_DISABLE_TAG "disable"
+#define EVLIST_CTL_CMD_ACK_TAG     "ack\n"
+
+#define EVLIST_CTL_CMD_MAX_LEN 64
+
+enum evlist_ctl_cmd {
+       EVLIST_CTL_CMD_UNSUPPORTED = 0,
+       EVLIST_CTL_CMD_ENABLE,
+       EVLIST_CTL_CMD_DISABLE,
+       EVLIST_CTL_CMD_ACK
+};
+
+int evlist__initialize_ctlfd(struct evlist *evlist, int ctl_fd, int ctl_fd_ack);
+int evlist__finalize_ctlfd(struct evlist *evlist);
+bool evlist__ctlfd_initialized(struct evlist *evlist);
+int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd);
+
+#define EVLIST_ENABLED_MSG "Events enabled\n"
+#define EVLIST_DISABLED_MSG "Events disabled\n"
+
 #endif /* __PERF_EVLIST_H */
index ef802f6..fd86500 100644 (file)
@@ -1014,12 +1014,14 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
        if (callchain && callchain->enabled && !evsel->no_aux_samples)
                evsel__config_callchain(evsel, opts, callchain);
 
-       if (opts->sample_intr_regs && !evsel->no_aux_samples) {
+       if (opts->sample_intr_regs && !evsel->no_aux_samples &&
+           !evsel__is_dummy_event(evsel)) {
                attr->sample_regs_intr = opts->sample_intr_regs;
                evsel__set_sample_bit(evsel, REGS_INTR);
        }
 
-       if (opts->sample_user_regs && !evsel->no_aux_samples) {
+       if (opts->sample_user_regs && !evsel->no_aux_samples &&
+           !evsel__is_dummy_event(evsel)) {
                attr->sample_regs_user |= opts->sample_user_regs;
                evsel__set_sample_bit(evsel, REGS_USER);
        }
@@ -1064,7 +1066,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
        attr->mmap  = track;
        attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
-       attr->ksymbol = track && !perf_missing_features.ksymbol;
+       /*
+        * ksymbol is tracked separately with text poke because it needs to be
+        * system wide and enabled immediately.
+        */
+       if (!opts->text_poke)
+               attr->ksymbol = track && !perf_missing_features.ksymbol;
        attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
 
        if (opts->record_namespaces)
@@ -2495,8 +2502,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
 
                return scnprintf(msg + printed, size - printed,
                 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
-                "access to performance monitoring and observability operations for users\n"
-                "without CAP_PERFMON or CAP_SYS_ADMIN Linux capability.\n"
+                "access to performance monitoring and observability operations for processes\n"
+                "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
+                "More information can be found at 'Perf events and tool security' document:\n"
+                "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
                 "perf_event_paranoid setting is %d:\n"
                 "  -1: Allow use of (almost) all events by all users\n"
                 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
@@ -2528,6 +2537,10 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
         "No such device - did you specify an out-of-range profile CPU?");
                break;
        case EOPNOTSUPP:
+               if (evsel->core.attr.aux_output)
+                       return scnprintf(msg, size,
+       "%s: PMU Hardware doesn't support 'aux_output' feature",
+                                        evsel__name(evsel));
                if (evsel->core.attr.sample_period != 0)
                        return scnprintf(msg, size,
        "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
index f64ab91..53482ef 100644 (file)
@@ -1,10 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <stdbool.h>
 #include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "metricgroup.h"
+#include "debug.h"
 #include "expr.h"
 #include "expr-bison.h"
 #include "expr-flex.h"
 #include <linux/kernel.h>
+#include <linux/zalloc.h>
+#include <ctype.h>
 
 #ifdef PARSER_DEBUG
 extern int expr_debug;
@@ -30,35 +37,144 @@ static bool key_equal(const void *key1, const void *key2,
 }
 
 /* Caller must make sure id is allocated */
-int expr__add_id(struct expr_parse_ctx *ctx, const char *name, double val)
+int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
 {
-       double *val_ptr = NULL, *old_val = NULL;
+       struct expr_id_data *data_ptr = NULL, *old_data = NULL;
        char *old_key = NULL;
        int ret;
 
-       if (val != 0.0) {
-               val_ptr = malloc(sizeof(double));
-               if (!val_ptr)
-                       return -ENOMEM;
-               *val_ptr = val;
+       data_ptr = malloc(sizeof(*data_ptr));
+       if (!data_ptr)
+               return -ENOMEM;
+
+       data_ptr->parent = ctx->parent;
+
+       ret = hashmap__set(&ctx->ids, id, data_ptr,
+                          (const void **)&old_key, (void **)&old_data);
+       if (ret)
+               free(data_ptr);
+       free(old_key);
+       free(old_data);
+       return ret;
+}
+
+/* Caller must make sure id is allocated */
+int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
+{
+       struct expr_id_data *data_ptr = NULL, *old_data = NULL;
+       char *old_key = NULL;
+       int ret;
+
+       data_ptr = malloc(sizeof(*data_ptr));
+       if (!data_ptr)
+               return -ENOMEM;
+       data_ptr->val = val;
+       data_ptr->is_ref = false;
+
+       ret = hashmap__set(&ctx->ids, id, data_ptr,
+                          (const void **)&old_key, (void **)&old_data);
+       if (ret)
+               free(data_ptr);
+       free(old_key);
+       free(old_data);
+       return ret;
+}
+
+int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
+{
+       struct expr_id_data *data_ptr = NULL, *old_data = NULL;
+       char *old_key = NULL;
+       char *name, *p;
+       int ret;
+
+       data_ptr = zalloc(sizeof(*data_ptr));
+       if (!data_ptr)
+               return -ENOMEM;
+
+       name = strdup(ref->metric_name);
+       if (!name) {
+               free(data_ptr);
+               return -ENOMEM;
        }
-       ret = hashmap__set(&ctx->ids, name, val_ptr,
-                          (const void **)&old_key, (void **)&old_val);
+
+       /*
+        * The jevents tool converts all metric expressions
+        * to lowercase, including metric references, hence
+        * we need to add lowercase name for metric, so it's
+        * properly found.
+        */
+       for (p = name; *p; p++)
+               *p = tolower(*p);
+
+       /*
+        * Intentionally passing just const char pointers,
+        * originally from 'struct pmu_event' object.
+        * We don't need to change them, so there's no
+        * need to create our own copy.
+        */
+       data_ptr->ref.metric_name = ref->metric_name;
+       data_ptr->ref.metric_expr = ref->metric_expr;
+       data_ptr->ref.counted = false;
+       data_ptr->is_ref = true;
+
+       ret = hashmap__set(&ctx->ids, name, data_ptr,
+                          (const void **)&old_key, (void **)&old_data);
+       if (ret)
+               free(data_ptr);
+
+       pr_debug2("adding ref metric %s: %s\n",
+                 ref->metric_name, ref->metric_expr);
+
        free(old_key);
-       free(old_val);
+       free(old_data);
        return ret;
 }
 
-int expr__get_id(struct expr_parse_ctx *ctx, const char *id, double *val_ptr)
+int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
+                struct expr_id_data **data)
+{
+       return hashmap__find(&ctx->ids, id, (void **)data) ? 0 : -1;
+}
+
+int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
+                    struct expr_id_data **datap)
 {
-       double *data;
+       struct expr_id_data *data;
 
-       if (!hashmap__find(&ctx->ids, id, (void **)&data))
+       if (expr__get_id(ctx, id, datap) || !*datap) {
+               pr_debug("%s not found\n", id);
                return -1;
-       *val_ptr = (data == NULL) ?  0.0 : *data;
+       }
+
+       data = *datap;
+
+       pr_debug2("lookup: is_ref %d, counted %d, val %f: %s\n",
+                 data->is_ref, data->ref.counted, data->val, id);
+
+       if (data->is_ref && !data->ref.counted) {
+               data->ref.counted = true;
+               pr_debug("processing metric: %s ENTRY\n", id);
+               if (expr__parse(&data->val, ctx, data->ref.metric_expr, 1)) {
+                       pr_debug("%s failed to count\n", id);
+                       return -1;
+               }
+               pr_debug("processing metric: %s EXIT: %f\n", id, data->val);
+       }
+
        return 0;
 }
 
+void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
+{
+       struct expr_id_data *old_val = NULL;
+       char *old_key = NULL;
+
+       hashmap__delete(&ctx->ids, id,
+                       (const void **)&old_key, (void **)&old_val);
+       free(old_key);
+       free(old_val);
+}
+
 void expr__ctx_init(struct expr_parse_ctx *ctx)
 {
        hashmap__init(&ctx->ids, key_hash, key_equal, NULL);
@@ -88,6 +204,8 @@ __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
        void *scanner;
        int ret;
 
+       pr_debug2("parsing metric: %s\n", expr);
+
        ret = expr_lex_init_extra(&scanner_ctx, &scanner);
        if (ret)
                return ret;
@@ -116,16 +234,10 @@ int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
 int expr__find_other(const char *expr, const char *one,
                     struct expr_parse_ctx *ctx, int runtime)
 {
-       double *old_val = NULL;
-       char *old_key = NULL;
        int ret = __expr__parse(NULL, ctx, expr, EXPR_OTHER, runtime);
 
-       if (one) {
-               hashmap__delete(&ctx->ids, one,
-                               (const void **)&old_key, (void **)&old_val);
-               free(old_key);
-               free(old_val);
-       }
+       if (one)
+               expr__del_id(ctx, one);
 
        return ret;
 }
index 8a2c107..fc2b5e8 100644 (file)
 #include "util/hashmap.h"
 //#endif
 
+struct metric_ref;
+
+struct expr_id {
+       char            *id;
+       struct expr_id  *parent;
+};
+
 struct expr_parse_ctx {
-       struct hashmap ids;
+       struct hashmap   ids;
+       struct expr_id  *parent;
+};
+
+struct expr_id_data {
+       union {
+               double val;
+               struct {
+                       const char *metric_name;
+                       const char *metric_expr;
+                       bool counted;
+               } ref;
+               struct expr_id  *parent;
+       };
+
+       bool is_ref;
 };
 
 struct expr_scanner_ctx {
@@ -22,8 +44,14 @@ struct expr_scanner_ctx {
 
 void expr__ctx_init(struct expr_parse_ctx *ctx);
 void expr__ctx_clear(struct expr_parse_ctx *ctx);
-int expr__add_id(struct expr_parse_ctx *ctx, const char *id, double val);
-int expr__get_id(struct expr_parse_ctx *ctx, const char *id, double *val_ptr);
+void expr__del_id(struct expr_parse_ctx *ctx, const char *id);
+int expr__add_id(struct expr_parse_ctx *ctx, const char *id);
+int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val);
+int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref);
+int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
+                struct expr_id_data **data);
+int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
+                    struct expr_id_data **datap);
 int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
                const char *expr, int runtime);
 int expr__find_other(const char *expr, const char *one,
index f397bf8..13e5e3c 100644 (file)
@@ -100,6 +100,7 @@ symbol              ({spec}|{sym})+
                }
        }
 
+d_ratio                { return D_RATIO; }
 max            { return MAX; }
 min            { return MIN; }
 if             { return IF; }
@@ -110,6 +111,8 @@ else                { return ELSE; }
 "|"            { return '|'; }
 "^"            { return '^'; }
 "&"            { return '&'; }
+"<"            { return '<'; }
+">"            { return '>'; }
 "-"            { return '-'; }
 "+"            { return '+'; }
 "*"            { return '*'; }
index bf3e898..d34b370 100644 (file)
 #include "smt.h"
 #include <string.h>
 
+static double d_ratio(double val0, double val1)
+{
+       if (val1 == 0) {
+               return 0;
+       }
+       return  val0 / val1;
+}
+
 %}
 
 %define api.pure full
 %token <num> NUMBER
 %token <str> ID
 %destructor { free ($$); } <str>
-%token MIN MAX IF ELSE SMT_ON
+%token MIN MAX IF ELSE SMT_ON D_RATIO
 %left MIN MAX IF
 %left '|'
 %left '^'
 %left '&'
+%left '<' '>'
 %left '-' '+'
 %left '*' '/' '%'
 %left NEG NOT
@@ -60,11 +69,12 @@ all_other: all_other other
 
 other: ID
 {
-       expr__add_id(ctx, $1, 0.0);
+       expr__add_id(ctx, $1);
 }
 |
 MIN | MAX | IF | ELSE | SMT_ON | NUMBER | '|' | '^' | '&' | '-' | '+' | '*' | '/' | '%' | '(' | ')' | ','
-
+|
+'<' | '>' | D_RATIO
 
 all_expr: if_expr                      { *final_val = $1; }
        ;
@@ -75,16 +85,22 @@ if_expr:
        ;
 
 expr:    NUMBER
-       | ID                    { if (expr__get_id(ctx, $1, &$$)) {
-                                       pr_debug("%s not found\n", $1);
+       | ID                    {
+                                       struct expr_id_data *data;
+
+                                       if (expr__resolve_id(ctx, $1, &data)) {
+                                               free($1);
+                                               YYABORT;
+                                       }
+
+                                       $$ = data->val;
                                        free($1);
-                                       YYABORT;
-                                 }
-                                 free($1);
                                }
        | expr '|' expr         { $$ = (long)$1 | (long)$3; }
        | expr '&' expr         { $$ = (long)$1 & (long)$3; }
        | expr '^' expr         { $$ = (long)$1 ^ (long)$3; }
+       | expr '<' expr         { $$ = $1 < $3; }
+       | expr '>' expr         { $$ = $1 > $3; }
        | expr '+' expr         { $$ = $1 + $3; }
        | expr '-' expr         { $$ = $1 - $3; }
        | expr '*' expr         { $$ = $1 * $3; }
@@ -105,6 +121,7 @@ expr:         NUMBER
        | MIN '(' expr ',' expr ')' { $$ = $3 < $5 ? $3 : $5; }
        | MAX '(' expr ',' expr ')' { $$ = $3 > $5 ? $3 : $5; }
        | SMT_ON                 { $$ = smt_on() > 0; }
+       | D_RATIO '(' expr ',' expr ')' { $$ = d_ratio($3,$5); }
        ;
 
 %%
index 7a67d01..9cf4efd 100644 (file)
@@ -46,6 +46,7 @@
 #include "util/util.h" // perf_exe()
 #include "cputopo.h"
 #include "bpf-event.h"
+#include "clockid.h"
 
 #include <linux/ctype.h>
 #include <internal/lib.h>
@@ -891,8 +892,42 @@ static int write_auxtrace(struct feat_fd *ff,
 static int write_clockid(struct feat_fd *ff,
                         struct evlist *evlist __maybe_unused)
 {
-       return do_write(ff, &ff->ph->env.clockid_res_ns,
-                       sizeof(ff->ph->env.clockid_res_ns));
+       return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
+                       sizeof(ff->ph->env.clock.clockid_res_ns));
+}
+
+static int write_clock_data(struct feat_fd *ff,
+                           struct evlist *evlist __maybe_unused)
+{
+       u64 *data64;
+       u32 data32;
+       int ret;
+
+       /* version */
+       data32 = 1;
+
+       ret = do_write(ff, &data32, sizeof(data32));
+       if (ret < 0)
+               return ret;
+
+       /* clockid */
+       data32 = ff->ph->env.clock.clockid;
+
+       ret = do_write(ff, &data32, sizeof(data32));
+       if (ret < 0)
+               return ret;
+
+       /* TOD ref time */
+       data64 = &ff->ph->env.clock.tod_ns;
+
+       ret = do_write(ff, data64, sizeof(*data64));
+       if (ret < 0)
+               return ret;
+
+       /* clockid ref time */
+       data64 = &ff->ph->env.clock.clockid_ns;
+
+       return do_write(ff, data64, sizeof(*data64));
 }
 
 static int write_dir_format(struct feat_fd *ff,
@@ -1546,7 +1581,50 @@ static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
 static void print_clockid(struct feat_fd *ff, FILE *fp)
 {
        fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
-               ff->ph->env.clockid_res_ns * 1000);
+               ff->ph->env.clock.clockid_res_ns * 1000);
+}
+
+static void print_clock_data(struct feat_fd *ff, FILE *fp)
+{
+       struct timespec clockid_ns;
+       char tstr[64], date[64];
+       struct timeval tod_ns;
+       clockid_t clockid;
+       struct tm ltime;
+       u64 ref;
+
+       if (!ff->ph->env.clock.enabled) {
+               fprintf(fp, "# reference time disabled\n");
+               return;
+       }
+
+       /* Compute TOD time. */
+       ref = ff->ph->env.clock.tod_ns;
+       tod_ns.tv_sec = ref / NSEC_PER_SEC;
+       ref -= tod_ns.tv_sec * NSEC_PER_SEC;
+       tod_ns.tv_usec = ref / NSEC_PER_USEC;
+
+       /* Compute clockid time. */
+       ref = ff->ph->env.clock.clockid_ns;
+       clockid_ns.tv_sec = ref / NSEC_PER_SEC;
+       ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
+       clockid_ns.tv_nsec = ref;
+
+       clockid = ff->ph->env.clock.clockid;
+
+       if (localtime_r(&tod_ns.tv_sec, &ltime) == NULL)
+               snprintf(tstr, sizeof(tstr), "<error>");
+       else {
+               strftime(date, sizeof(date), "%F %T", &ltime);
+               scnprintf(tstr, sizeof(tstr), "%s.%06d",
+                         date, (int) tod_ns.tv_usec);
+       }
+
+       fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
+       fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
+                   tstr, tod_ns.tv_sec, (int) tod_ns.tv_usec,
+                   clockid_ns.tv_sec, clockid_ns.tv_nsec,
+                   clockid_name(clockid));
 }
 
 static void print_dir_format(struct feat_fd *ff, FILE *fp)
@@ -1978,7 +2056,7 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
        struct machine *machine;
        u16 cpumode;
        struct dso *dso;
-       enum dso_kernel_type dso_type;
+       enum dso_space_type dso_space;
 
        machine = perf_session__findnew_machine(session, bev->pid);
        if (!machine)
@@ -1988,14 +2066,14 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
 
        switch (cpumode) {
        case PERF_RECORD_MISC_KERNEL:
-               dso_type = DSO_TYPE_KERNEL;
+               dso_space = DSO_SPACE__KERNEL;
                break;
        case PERF_RECORD_MISC_GUEST_KERNEL:
-               dso_type = DSO_TYPE_GUEST_KERNEL;
+               dso_space = DSO_SPACE__KERNEL_GUEST;
                break;
        case PERF_RECORD_MISC_USER:
        case PERF_RECORD_MISC_GUEST_USER:
-               dso_type = DSO_TYPE_USER;
+               dso_space = DSO_SPACE__USER;
                break;
        default:
                goto out;
@@ -2007,14 +2085,13 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
 
                dso__set_build_id(dso, &bev->build_id);
 
-               if (dso_type != DSO_TYPE_USER) {
+               if (dso_space != DSO_SPACE__USER) {
                        struct kmod_path m = { .name = NULL, };
 
                        if (!kmod_path__parse_name(&m, filename) && m.kmod)
                                dso__set_module_info(dso, &m, machine);
-                       else
-                               dso->kernel = dso_type;
 
+                       dso->kernel = dso_space;
                        free(m.name);
                }
 
@@ -2732,9 +2809,43 @@ out:
 static int process_clockid(struct feat_fd *ff,
                           void *data __maybe_unused)
 {
-       if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
+       if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
+               return -1;
+
+       return 0;
+}
+
+static int process_clock_data(struct feat_fd *ff,
+                             void *_data __maybe_unused)
+{
+       u32 data32;
+       u64 data64;
+
+       /* version */
+       if (do_read_u32(ff, &data32))
+               return -1;
+
+       if (data32 != 1)
+               return -1;
+
+       /* clockid */
+       if (do_read_u32(ff, &data32))
+               return -1;
+
+       ff->ph->env.clock.clockid = data32;
+
+       /* TOD ref time */
+       if (do_read_u64(ff, &data64))
+               return -1;
+
+       ff->ph->env.clock.tod_ns = data64;
+
+       /* clockid ref time */
+       if (do_read_u64(ff, &data64))
                return -1;
 
+       ff->ph->env.clock.clockid_ns = data64;
+       ff->ph->env.clock.enabled = true;
        return 0;
 }
 
@@ -3008,6 +3119,7 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPR(BPF_BTF,       bpf_btf,        false),
        FEAT_OPR(COMPRESSED,    compressed,     false),
        FEAT_OPR(CPU_PMU_CAPS,  cpu_pmu_caps,   false),
+       FEAT_OPR(CLOCK_DATA,    clock_data,     false),
 };
 
 struct header_print_data {
index 650bd1c..2aca717 100644 (file)
@@ -44,6 +44,7 @@ enum {
        HEADER_BPF_BTF,
        HEADER_COMPRESSED,
        HEADER_CPU_PMU_CAPS,
+       HEADER_CLOCK_DATA,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
index f8ccfd6..697513f 100644 (file)
@@ -55,6 +55,7 @@ enum intel_pt_pkt_state {
        INTEL_PT_STATE_TIP_PGD,
        INTEL_PT_STATE_FUP,
        INTEL_PT_STATE_FUP_NO_TIP,
+       INTEL_PT_STATE_RESAMPLE,
 };
 
 static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
@@ -65,6 +66,7 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
        case INTEL_PT_STATE_ERR_RESYNC:
        case INTEL_PT_STATE_IN_SYNC:
        case INTEL_PT_STATE_TNT_CONT:
+       case INTEL_PT_STATE_RESAMPLE:
                return true;
        case INTEL_PT_STATE_TNT:
        case INTEL_PT_STATE_TIP:
@@ -109,6 +111,9 @@ struct intel_pt_decoder {
        bool fixup_last_mtc;
        bool have_last_ip;
        bool in_psb;
+       bool hop;
+       bool hop_psb_fup;
+       bool leap;
        enum intel_pt_param_flags flags;
        uint64_t pos;
        uint64_t last_ip;
@@ -235,6 +240,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
        decoder->data               = params->data;
        decoder->return_compression = params->return_compression;
        decoder->branch_enable      = params->branch_enable;
+       decoder->hop                = params->quick >= 1;
+       decoder->leap               = params->quick >= 2;
 
        decoder->flags              = params->flags;
 
@@ -275,6 +282,9 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
        intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
        intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
 
+       if (decoder->hop)
+               intel_pt_log("Hop mode: decoding FUP and TIPs, but not TNT\n");
+
        return decoder;
 }
 
@@ -1164,6 +1174,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
                        return 0;
                if (err == -EAGAIN ||
                    intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
+                       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
                        if (intel_pt_fup_event(decoder))
                                return 0;
                        return -EAGAIN;
@@ -1729,8 +1740,14 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
 
                case INTEL_PT_FUP:
                        decoder->pge = true;
-                       if (decoder->packet.count)
+                       if (decoder->packet.count) {
                                intel_pt_set_last_ip(decoder);
+                               if (decoder->hop) {
+                                       /* Act on FUP at PSBEND */
+                                       decoder->ip = decoder->last_ip;
+                                       decoder->hop_psb_fup = true;
+                               }
+                       }
                        break;
 
                case INTEL_PT_MODE_TSX:
@@ -1874,6 +1891,127 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
        }
 }
 
+static int intel_pt_resample(struct intel_pt_decoder *decoder)
+{
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       decoder->state.type = INTEL_PT_INSTRUCTION;
+       decoder->state.from_ip = decoder->ip;
+       decoder->state.to_ip = 0;
+       return 0;
+}
+
+#define HOP_PROCESS    0
+#define HOP_IGNORE     1
+#define HOP_RETURN     2
+#define HOP_AGAIN      3
+
+static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
+
+/* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
+static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
+{
+       /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
+       if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
+               *err = intel_pt_scan_for_psb(decoder);
+               if (*err)
+                       return HOP_RETURN;
+       }
+
+       switch (decoder->packet.type) {
+       case INTEL_PT_TNT:
+               return HOP_IGNORE;
+
+       case INTEL_PT_TIP_PGD:
+               if (!decoder->packet.count)
+                       return HOP_IGNORE;
+               intel_pt_set_ip(decoder);
+               decoder->state.type |= INTEL_PT_TRACE_END;
+               decoder->state.from_ip = 0;
+               decoder->state.to_ip = decoder->ip;
+               return HOP_RETURN;
+
+       case INTEL_PT_TIP:
+               if (!decoder->packet.count)
+                       return HOP_IGNORE;
+               intel_pt_set_ip(decoder);
+               decoder->state.type = INTEL_PT_INSTRUCTION;
+               decoder->state.from_ip = decoder->ip;
+               decoder->state.to_ip = 0;
+               return HOP_RETURN;
+
+       case INTEL_PT_FUP:
+               if (!decoder->packet.count)
+                       return HOP_IGNORE;
+               intel_pt_set_ip(decoder);
+               if (intel_pt_fup_event(decoder))
+                       return HOP_RETURN;
+               if (!decoder->branch_enable)
+                       *no_tip = true;
+               if (*no_tip) {
+                       decoder->state.type = INTEL_PT_INSTRUCTION;
+                       decoder->state.from_ip = decoder->ip;
+                       decoder->state.to_ip = 0;
+                       return HOP_RETURN;
+               }
+               *err = intel_pt_walk_fup_tip(decoder);
+               if (!*err)
+                       decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+               return HOP_RETURN;
+
+       case INTEL_PT_PSB:
+               decoder->last_ip = 0;
+               decoder->have_last_ip = true;
+               decoder->hop_psb_fup = false;
+               *err = intel_pt_walk_psbend(decoder);
+               if (*err == -EAGAIN)
+                       return HOP_AGAIN;
+               if (*err)
+                       return HOP_RETURN;
+               if (decoder->hop_psb_fup) {
+                       decoder->hop_psb_fup = false;
+                       decoder->state.type = INTEL_PT_INSTRUCTION;
+                       decoder->state.from_ip = decoder->ip;
+                       decoder->state.to_ip = 0;
+                       return HOP_RETURN;
+               }
+               if (decoder->cbr != decoder->cbr_seen) {
+                       decoder->state.type = 0;
+                       return HOP_RETURN;
+               }
+               return HOP_IGNORE;
+
+       case INTEL_PT_BAD:
+       case INTEL_PT_PAD:
+       case INTEL_PT_TIP_PGE:
+       case INTEL_PT_TSC:
+       case INTEL_PT_TMA:
+       case INTEL_PT_MODE_EXEC:
+       case INTEL_PT_MODE_TSX:
+       case INTEL_PT_MTC:
+       case INTEL_PT_CYC:
+       case INTEL_PT_VMCS:
+       case INTEL_PT_PSBEND:
+       case INTEL_PT_CBR:
+       case INTEL_PT_TRACESTOP:
+       case INTEL_PT_PIP:
+       case INTEL_PT_OVF:
+       case INTEL_PT_MNT:
+       case INTEL_PT_PTWRITE:
+       case INTEL_PT_PTWRITE_IP:
+       case INTEL_PT_EXSTOP:
+       case INTEL_PT_EXSTOP_IP:
+       case INTEL_PT_MWAIT:
+       case INTEL_PT_PWRE:
+       case INTEL_PT_PWRX:
+       case INTEL_PT_BBP:
+       case INTEL_PT_BIP:
+       case INTEL_PT_BEP:
+       case INTEL_PT_BEP_IP:
+       default:
+               return HOP_PROCESS;
+       }
+}
+
 static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 {
        bool no_tip = false;
@@ -1884,6 +2022,19 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
                if (err)
                        return err;
 next:
+               if (decoder->hop) {
+                       switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
+                       case HOP_IGNORE:
+                               continue;
+                       case HOP_RETURN:
+                               return err;
+                       case HOP_AGAIN:
+                               goto next;
+                       default:
+                               break;
+                       }
+               }
+
                switch (decoder->packet.type) {
                case INTEL_PT_TNT:
                        if (!decoder->packet.count)
@@ -1913,6 +2064,12 @@ next:
                        decoder->state.from_ip = 0;
                        decoder->state.to_ip = decoder->ip;
                        decoder->state.type |= INTEL_PT_TRACE_BEGIN;
+                       /*
+                        * In hop mode, resample to get the to_ip as an
+                        * "instruction" sample.
+                        */
+                       if (decoder->hop)
+                               decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
                        return 0;
                }
 
@@ -1942,17 +2099,13 @@ next:
                        }
                        if (decoder->set_fup_mwait)
                                no_tip = true;
+                       if (no_tip)
+                               decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
+                       else
+                               decoder->pkt_state = INTEL_PT_STATE_FUP;
                        err = intel_pt_walk_fup(decoder);
-                       if (err != -EAGAIN) {
-                               if (err)
-                                       return err;
-                               if (no_tip)
-                                       decoder->pkt_state =
-                                               INTEL_PT_STATE_FUP_NO_TIP;
-                               else
-                                       decoder->pkt_state = INTEL_PT_STATE_FUP;
-                               return 0;
-                       }
+                       if (err != -EAGAIN)
+                               return err;
                        if (no_tip) {
                                no_tip = false;
                                break;
@@ -1980,8 +2133,10 @@ next:
                         * possibility of another CBR change that gets caught up
                         * in the PSB+.
                         */
-                       if (decoder->cbr != decoder->cbr_seen)
+                       if (decoder->cbr != decoder->cbr_seen) {
+                               decoder->state.type = 0;
                                return 0;
+                       }
                        break;
 
                case INTEL_PT_PIP:
@@ -2022,8 +2177,10 @@ next:
 
                case INTEL_PT_CBR:
                        intel_pt_calc_cbr(decoder);
-                       if (decoder->cbr != decoder->cbr_seen)
+                       if (decoder->cbr != decoder->cbr_seen) {
+                               decoder->state.type = 0;
                                return 0;
+                       }
                        break;
 
                case INTEL_PT_MODE_EXEC:
@@ -2032,7 +2189,7 @@ next:
 
                case INTEL_PT_MODE_TSX:
                        /* MODE_TSX need not be followed by FUP */
-                       if (!decoder->pge) {
+                       if (!decoder->pge || decoder->in_psb) {
                                intel_pt_update_in_tx(decoder);
                                break;
                        }
@@ -2423,7 +2580,11 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
        if (err)
                return err;
 
-       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       /* In hop mode, resample to get the to_ip as an "instruction" sample */
+       if (decoder->hop)
+               decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+       else
+               decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
        decoder->overflow = false;
 
        decoder->state.from_ip = 0;
@@ -2531,6 +2692,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
        decoder->ip = 0;
        intel_pt_clear_stack(&decoder->stack);
 
+leap:
        err = intel_pt_scan_for_psb(decoder);
        if (err)
                return err;
@@ -2544,7 +2706,20 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
 
        if (decoder->ip) {
                decoder->state.type = 0; /* Do not have a sample */
-               decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+               /*
+                * In hop mode, resample to get the PSB FUP ip as an
+                * "instruction" sample.
+                */
+               if (decoder->hop)
+                       decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+               else
+                       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       } else if (decoder->leap) {
+               /*
+                * In leap mode, only PSB+ is decoded, so keeping leaping to the
+                * next PSB until there is an ip.
+                */
+               goto leap;
        } else {
                return intel_pt_sync_ip(decoder);
        }
@@ -2599,19 +2774,18 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
                        err = intel_pt_walk_tip(decoder);
                        break;
                case INTEL_PT_STATE_FUP:
-                       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
                        err = intel_pt_walk_fup(decoder);
                        if (err == -EAGAIN)
                                err = intel_pt_walk_fup_tip(decoder);
-                       else if (!err)
-                               decoder->pkt_state = INTEL_PT_STATE_FUP;
                        break;
                case INTEL_PT_STATE_FUP_NO_TIP:
-                       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
                        err = intel_pt_walk_fup(decoder);
                        if (err == -EAGAIN)
                                err = intel_pt_walk_trace(decoder);
                        break;
+               case INTEL_PT_STATE_RESAMPLE:
+                       err = intel_pt_resample(decoder);
+                       break;
                default:
                        err = intel_pt_bug(decoder);
                        break;
index e289e46..8645fc2 100644 (file)
@@ -250,6 +250,7 @@ struct intel_pt_params {
        uint32_t tsc_ctc_ratio_n;
        uint32_t tsc_ctc_ratio_d;
        enum intel_pt_param_flags flags;
+       unsigned int quick;
 };
 
 struct intel_pt_decoder;
index cb3c1e5..2a8d245 100644 (file)
@@ -236,7 +236,7 @@ static void intel_pt_log_event(union perf_event *event)
        if (!intel_pt_enable_logging || !f)
                return;
 
-       perf_event__fprintf(event, f);
+       perf_event__fprintf(event, NULL, f);
 }
 
 static void intel_pt_dump_sample(struct perf_session *session,
@@ -249,6 +249,24 @@ static void intel_pt_dump_sample(struct perf_session *session,
        intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
 }
 
+static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
+{
+       struct perf_time_interval *range = pt->synth_opts.ptime_range;
+       int n = pt->synth_opts.range_num;
+
+       if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
+               return true;
+
+       if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
+               return false;
+
+       /* perf_time__ranges_skip_sample does not work if time is zero */
+       if (!tm)
+               tm = 1;
+
+       return !n || !perf_time__ranges_skip_sample(range, n, tm);
+}
+
 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
                                   struct auxtrace_buffer *b)
 {
@@ -520,6 +538,17 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
        return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
 }
 
+static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
+                                     u64 offset)
+{
+       struct auxtrace_cache *c = intel_pt_cache(dso, machine);
+
+       if (!c)
+               return;
+
+       auxtrace_cache__remove(dso->auxtrace_cache, offset);
+}
+
 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
 {
        return ip >= pt->kernel_start ?
@@ -1001,6 +1030,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
        params.mtc_period = intel_pt_mtc_period(pt);
        params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
        params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
+       params.quick = pt->synth_opts.quick;
 
        if (pt->filts.cnt > 0)
                params.pgd_ip = intel_pt_pgd_ip;
@@ -1394,7 +1424,10 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
 
        sample.id = ptq->pt->instructions_id;
        sample.stream_id = ptq->pt->instructions_id;
-       sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
+       if (pt->synth_opts.quick)
+               sample.period = 1;
+       else
+               sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
 
        sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
        if (sample.cyc_cnt) {
@@ -1852,6 +1885,15 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
        char msg[MAX_AUXTRACE_ERROR_MSG];
        int err;
 
+       if (pt->synth_opts.error_minus_flags) {
+               if (code == INTEL_PT_ERR_OVR &&
+                   pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
+                       return 0;
+               if (code == INTEL_PT_ERR_LOST &&
+                   pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
+                       return 0;
+       }
+
        intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
 
        auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
@@ -2566,10 +2608,6 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
                return -EINVAL;
        }
 
-       intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
-                    cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
-                    &pt->tc));
-
        ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
        if (ret <= 0)
                return ret;
@@ -2594,6 +2632,67 @@ static int intel_pt_process_itrace_start(struct intel_pt *pt,
                                        event->itrace_start.tid);
 }
 
+static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
+                            struct addr_location *al)
+{
+       if (!al->map || addr < al->map->start || addr >= al->map->end) {
+               if (!thread__find_map(thread, cpumode, addr, al))
+                       return -1;
+       }
+
+       return 0;
+}
+
+/* Invalidate all instruction cache entries that overlap the text poke */
+static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
+{
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
+       /* Assume text poke begins in a basic block no more than 4096 bytes */
+       int cnt = 4096 + event->text_poke.new_len;
+       struct thread *thread = pt->unknown_thread;
+       struct addr_location al = { .map = NULL };
+       struct machine *machine = pt->machine;
+       struct intel_pt_cache_entry *e;
+       u64 offset;
+
+       if (!event->text_poke.new_len)
+               return 0;
+
+       for (; cnt; cnt--, addr--) {
+               if (intel_pt_find_map(thread, cpumode, addr, &al)) {
+                       if (addr < event->text_poke.addr)
+                               return 0;
+                       continue;
+               }
+
+               if (!al.map->dso || !al.map->dso->auxtrace_cache)
+                       continue;
+
+               offset = al.map->map_ip(al.map, addr);
+
+               e = intel_pt_cache_lookup(al.map->dso, machine, offset);
+               if (!e)
+                       continue;
+
+               if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
+                       /*
+                        * No overlap. Working backwards there cannot be another
+                        * basic block that overlaps the text poke if there is a
+                        * branch instruction before the text poke address.
+                        */
+                       if (e->branch != INTEL_PT_BR_NO_BRANCH)
+                               return 0;
+               } else {
+                       intel_pt_cache_invalidate(al.map->dso, machine, offset);
+                       intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
+                                    al.map->dso->long_name, addr);
+               }
+       }
+
+       return 0;
+}
+
 static int intel_pt_process_event(struct perf_session *session,
                                  union perf_event *event,
                                  struct perf_sample *sample,
@@ -2662,9 +2761,14 @@ static int intel_pt_process_event(struct perf_session *session,
                 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
                err = intel_pt_context_switch(pt, event, sample);
 
-       intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
-                    event->header.type, sample->cpu, sample->time, timestamp);
-       intel_pt_log_event(event);
+       if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
+               err = intel_pt_text_poke(pt, event);
+
+       if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
+               intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+                            event->header.type, sample->cpu, sample->time, timestamp);
+               intel_pt_log_event(event);
+       }
 
        return err;
 }
index 32bb05e..0804308 100644 (file)
@@ -26,6 +26,7 @@
 #include "jit.h"
 #include "jitdump.h"
 #include "genelf.h"
+#include "thread.h"
 
 #include <linux/ctype.h>
 #include <linux/zalloc.h>
@@ -749,6 +750,28 @@ jit_detect(char *mmap_name, pid_t pid)
        return 0;
 }
 
+static void jit_add_pid(struct machine *machine, pid_t pid)
+{
+       struct thread *thread = machine__findnew_thread(machine, pid, pid);
+
+       if (!thread) {
+               pr_err("%s: thread %d not found or created\n", __func__, pid);
+               return;
+       }
+
+       thread->priv = (void *)1;
+}
+
+static bool jit_has_pid(struct machine *machine, pid_t pid)
+{
+       struct thread *thread = machine__find_thread(machine, pid, pid);
+
+       if (!thread)
+               return 0;
+
+       return (bool)thread->priv;
+}
+
 int
 jit_process(struct perf_session *session,
            struct perf_data *output,
@@ -764,8 +787,13 @@ jit_process(struct perf_session *session,
        /*
         * first, detect marker mmap (i.e., the jitdump mmap)
         */
-       if (jit_detect(filename, pid))
+       if (jit_detect(filename, pid)) {
+               // Strip //anon* mmaps if we processed a jitdump for this pid
+               if (jit_has_pid(machine, pid) && (strncmp(filename, "//anon", 6) == 0))
+                       return 1;
+
                return 0;
+       }
 
        memset(&jd, 0, sizeof(jd));
 
@@ -784,6 +812,7 @@ jit_process(struct perf_session *session,
 
        ret = jit_inject(&jd, filename);
        if (!ret) {
+               jit_add_pid(machine, pid);
                *nbytes = jd.bytes_written;
                ret = 1;
        }
index d538480..208b813 100644 (file)
@@ -703,7 +703,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
 
                dso__set_module_info(dso, m, machine);
                dso__set_long_name(dso, strdup(filename), true);
-               dso->kernel = DSO_TYPE_KERNEL;
+               dso->kernel = DSO_SPACE__KERNEL;
        }
 
        dso__get(dso);
@@ -753,7 +753,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
                struct dso *dso = dso__new(event->ksymbol.name);
 
                if (dso) {
-                       dso->kernel = DSO_TYPE_KERNEL;
+                       dso->kernel = DSO_SPACE__KERNEL;
                        map = map__new2(0, dso);
                }
 
@@ -762,6 +762,12 @@ static int machine__process_ksymbol_register(struct machine *machine,
                        return -ENOMEM;
                }
 
+               if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
+                       map->dso->binary_type = DSO_BINARY_TYPE__OOL;
+                       map->dso->data.file_size = event->ksymbol.len;
+                       dso__set_loaded(map->dso);
+               }
+
                map->start = event->ksymbol.addr;
                map->end = map->start + event->ksymbol.len;
                maps__insert(&machine->kmaps, map);
@@ -808,6 +814,47 @@ int machine__process_ksymbol(struct machine *machine __maybe_unused,
        return machine__process_ksymbol_register(machine, event, sample);
 }
 
+int machine__process_text_poke(struct machine *machine, union perf_event *event,
+                              struct perf_sample *sample __maybe_unused)
+{
+       struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+       if (dump_trace)
+               perf_event__fprintf_text_poke(event, machine, stdout);
+
+       if (!event->text_poke.new_len)
+               return 0;
+
+       if (cpumode != PERF_RECORD_MISC_KERNEL) {
+               pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
+               return 0;
+       }
+
+       if (map && map->dso) {
+               u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
+               int ret;
+
+               /*
+                * Kernel maps might be changed when loading symbols so loading
+                * must be done prior to using kernel maps.
+                */
+               map__load(map);
+               ret = dso__data_write_cache_addr(map->dso, map, machine,
+                                                event->text_poke.addr,
+                                                new_bytes,
+                                                event->text_poke.new_len);
+               if (ret != event->text_poke.new_len)
+                       pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
+                                event->text_poke.addr);
+       } else {
+               pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
+                        event->text_poke.addr);
+       }
+
+       return 0;
+}
+
 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
                                              const char *filename)
 {
@@ -924,14 +971,14 @@ static struct dso *machine__get_kernel(struct machine *machine)
                        vmlinux_name = symbol_conf.vmlinux_name;
 
                kernel = machine__findnew_kernel(machine, vmlinux_name,
-                                                "[kernel]", DSO_TYPE_KERNEL);
+                                                "[kernel]", DSO_SPACE__KERNEL);
        } else {
                if (symbol_conf.default_guest_vmlinux_name)
                        vmlinux_name = symbol_conf.default_guest_vmlinux_name;
 
                kernel = machine__findnew_kernel(machine, vmlinux_name,
                                                 "[guest.kernel]",
-                                                DSO_TYPE_GUEST_KERNEL);
+                                                DSO_SPACE__KERNEL_GUEST);
        }
 
        if (kernel != NULL && (!kernel->has_build_id))
@@ -1559,7 +1606,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                                              union perf_event *event)
 {
        struct map *map;
-       enum dso_kernel_type kernel_type;
+       enum dso_space_type dso_space;
        bool is_kernel_mmap;
 
        /* If we have maps from kcore then we do not need or want any others */
@@ -1567,9 +1614,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                return 0;
 
        if (machine__is_host(machine))
-               kernel_type = DSO_TYPE_KERNEL;
+               dso_space = DSO_SPACE__KERNEL;
        else
-               kernel_type = DSO_TYPE_GUEST_KERNEL;
+               dso_space = DSO_SPACE__KERNEL_GUEST;
 
        is_kernel_mmap = memcmp(event->mmap.filename,
                                machine->mmap_name,
@@ -1629,7 +1676,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                if (kernel == NULL)
                        goto out_problem;
 
-               kernel->kernel = kernel_type;
+               kernel->kernel = dso_space;
                if (__machine__create_kernel_maps(machine, kernel) < 0) {
                        dso__put(kernel);
                        goto out_problem;
@@ -1930,6 +1977,8 @@ int machine__process_event(struct machine *machine, union perf_event *event,
                ret = machine__process_ksymbol(machine, event, sample); break;
        case PERF_RECORD_BPF_EVENT:
                ret = machine__process_bpf(machine, event, sample); break;
+       case PERF_RECORD_TEXT_POKE:
+               ret = machine__process_text_poke(machine, event, sample); break;
        default:
                ret = -1;
                break;
index fa1be9e..062c36a 100644 (file)
@@ -138,6 +138,9 @@ int machine__process_mmap2_event(struct machine *machine, union perf_event *even
 int machine__process_ksymbol(struct machine *machine,
                             union perf_event *event,
                             struct perf_sample *sample);
+int machine__process_text_poke(struct machine *machine,
+                              union perf_event *event,
+                              struct perf_sample *sample);
 int machine__process_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample);
 
index 53d9661..1d72108 100644 (file)
@@ -267,6 +267,11 @@ bool __map__is_bpf_prog(const struct map *map)
        return name && (strstr(name, "bpf_prog_") == name);
 }
 
+bool __map__is_ool(const struct map *map)
+{
+       return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
+}
+
 bool map__has_symbols(const struct map *map)
 {
        return dso__has_symbols(map->dso);
@@ -481,7 +486,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
         * kernel modules also have DSO_TYPE_USER in dso->kernel,
         * but all kernel modules are ET_REL, so won't get here.
         */
-       if (map->dso->kernel == DSO_TYPE_USER)
+       if (map->dso->kernel == DSO_SPACE__USER)
                return rip + map->dso->text_offset;
 
        return map->unmap_ip(map, rip) - map->reloc;
@@ -511,7 +516,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
         * kernel modules also have DSO_TYPE_USER in dso->kernel,
         * but all kernel modules are ET_REL, so won't get here.
         */
-       if (map->dso->kernel == DSO_TYPE_USER)
+       if (map->dso->kernel == DSO_SPACE__USER)
                return map->unmap_ip(map, ip - map->dso->text_offset);
 
        return ip + map->reloc;
index 067036e..9e312ae 100644 (file)
@@ -147,11 +147,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
 bool __map__is_kernel(const struct map *map);
 bool __map__is_extra_kernel_map(const struct map *map);
 bool __map__is_bpf_prog(const struct map *map);
+bool __map__is_ool(const struct map *map);
 
 static inline bool __map__is_kmodule(const struct map *map)
 {
        return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
-              !__map__is_bpf_prog(map);
+              !__map__is_bpf_prog(map) && !__map__is_ool(map);
 }
 
 bool map__has_symbols(const struct map *map);
index 9e21aa7..8831b96 100644 (file)
@@ -24,6 +24,7 @@
 #include <subcmd/parse-options.h>
 #include <api/fs/fs.h>
 #include "util.h"
+#include <asm/bug.h>
 
 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
                                         struct evsel *evsel,
@@ -76,23 +77,78 @@ static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
        return &me->nd;
 }
 
+static void metric_event_delete(struct rblist *rblist __maybe_unused,
+                               struct rb_node *rb_node)
+{
+       struct metric_event *me = container_of(rb_node, struct metric_event, nd);
+       struct metric_expr *expr, *tmp;
+
+       list_for_each_entry_safe(expr, tmp, &me->head, nd) {
+               free(expr->metric_refs);
+               free(expr);
+       }
+
+       free(me);
+}
+
 static void metricgroup__rblist_init(struct rblist *metric_events)
 {
        rblist__init(metric_events);
        metric_events->node_cmp = metric_event_cmp;
        metric_events->node_new = metric_event_new;
+       metric_events->node_delete = metric_event_delete;
 }
 
-struct egroup {
+void metricgroup__rblist_exit(struct rblist *metric_events)
+{
+       rblist__exit(metric_events);
+}
+
+/*
+ * A node in the list of referenced metrics. metric_expr
+ * is held as a convenience to avoid a search through the
+ * metric list.
+ */
+struct metric_ref_node {
+       const char *metric_name;
+       const char *metric_expr;
+       struct list_head list;
+};
+
+struct metric {
        struct list_head nd;
        struct expr_parse_ctx pctx;
        const char *metric_name;
        const char *metric_expr;
        const char *metric_unit;
+       struct list_head metric_refs;
+       int metric_refs_cnt;
        int runtime;
        bool has_constraint;
 };
 
+#define RECURSION_ID_MAX 1000
+
+struct expr_ids {
+       struct expr_id  id[RECURSION_ID_MAX];
+       int             cnt;
+};
+
+static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
+{
+       if (ids->cnt >= RECURSION_ID_MAX)
+               return NULL;
+       return &ids->id[ids->cnt++];
+}
+
+static void expr_ids__exit(struct expr_ids *ids)
+{
+       int i;
+
+       for (i = 0; i < ids->cnt; i++)
+               free(ids->id[i].id);
+}
+
 /**
  * Find a group of events in perf_evlist that correpond to those from a parsed
  * metric expression. Note, as find_evsel_group is called in the same order as
@@ -119,7 +175,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                                      unsigned long *evlist_used)
 {
        struct evsel *ev, *current_leader = NULL;
-       double *val_ptr;
+       struct expr_id_data *val_ptr;
        int i = 0, matched_events = 0, events_to_match;
        const int idnum = (int)hashmap__size(&pctx->ids);
 
@@ -206,7 +262,7 @@ static int metricgroup__setup_events(struct list_head *groups,
        struct metric_expr *expr;
        int i = 0;
        int ret = 0;
-       struct egroup *eg;
+       struct metric *m;
        struct evsel *evsel, *tmp;
        unsigned long *evlist_used;
 
@@ -214,22 +270,23 @@ static int metricgroup__setup_events(struct list_head *groups,
        if (!evlist_used)
                return -ENOMEM;
 
-       list_for_each_entry (eg, groups, nd) {
+       list_for_each_entry (m, groups, nd) {
                struct evsel **metric_events;
+               struct metric_ref *metric_refs = NULL;
 
                metric_events = calloc(sizeof(void *),
-                               hashmap__size(&eg->pctx.ids) + 1);
+                               hashmap__size(&m->pctx.ids) + 1);
                if (!metric_events) {
                        ret = -ENOMEM;
                        break;
                }
-               evsel = find_evsel_group(perf_evlist, &eg->pctx,
+               evsel = find_evsel_group(perf_evlist, &m->pctx,
                                         metric_no_merge,
-                                        eg->has_constraint, metric_events,
+                                        m->has_constraint, metric_events,
                                         evlist_used);
                if (!evsel) {
                        pr_debug("Cannot resolve %s: %s\n",
-                                       eg->metric_name, eg->metric_expr);
+                                       m->metric_name, m->metric_expr);
                        free(metric_events);
                        continue;
                }
@@ -247,11 +304,41 @@ static int metricgroup__setup_events(struct list_head *groups,
                        free(metric_events);
                        break;
                }
-               expr->metric_expr = eg->metric_expr;
-               expr->metric_name = eg->metric_name;
-               expr->metric_unit = eg->metric_unit;
+
+               /*
+                * Collect and store collected nested expressions
+                * for metric processing.
+                */
+               if (m->metric_refs_cnt) {
+                       struct metric_ref_node *ref;
+
+                       metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
+                       if (!metric_refs) {
+                               ret = -ENOMEM;
+                               free(metric_events);
+                               break;
+                       }
+
+                       i = 0;
+                       list_for_each_entry(ref, &m->metric_refs, list) {
+                               /*
+                                * Intentionally passing just const char pointers,
+                                * originally from 'struct pmu_event' object.
+                                * We don't need to change them, so there's no
+                                * need to create our own copy.
+                                */
+                               metric_refs[i].metric_name = ref->metric_name;
+                               metric_refs[i].metric_expr = ref->metric_expr;
+                               i++;
+                       }
+               };
+
+               expr->metric_refs = metric_refs;
+               expr->metric_expr = m->metric_expr;
+               expr->metric_name = m->metric_name;
+               expr->metric_unit = m->metric_unit;
                expr->metric_events = metric_events;
-               expr->runtime = eg->runtime;
+               expr->runtime = m->runtime;
                list_add(&expr->nd, &me->head);
        }
 
@@ -552,123 +639,339 @@ int __weak arch_get_runtimeparam(void)
        return 1;
 }
 
-static int __metricgroup__add_metric(struct list_head *group_list,
-                                    struct pmu_event *pe,
-                                    bool metric_no_group,
-                                    int runtime)
+static int __add_metric(struct list_head *metric_list,
+                       struct pmu_event *pe,
+                       bool metric_no_group,
+                       int runtime,
+                       struct metric **mp,
+                       struct expr_id *parent,
+                       struct expr_ids *ids)
 {
-       struct egroup *eg;
+       struct metric_ref_node *ref;
+       struct metric *m;
 
-       eg = malloc(sizeof(*eg));
-       if (!eg)
-               return -ENOMEM;
+       if (*mp == NULL) {
+               /*
+                * We got in here for the parent group,
+                * allocate it and put it on the list.
+                */
+               m = zalloc(sizeof(*m));
+               if (!m)
+                       return -ENOMEM;
+
+               expr__ctx_init(&m->pctx);
+               m->metric_name = pe->metric_name;
+               m->metric_expr = pe->metric_expr;
+               m->metric_unit = pe->unit;
+               m->runtime = runtime;
+               m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
+               INIT_LIST_HEAD(&m->metric_refs);
+               m->metric_refs_cnt = 0;
+               *mp = m;
+
+               parent = expr_ids__alloc(ids);
+               if (!parent) {
+                       free(m);
+                       return -EINVAL;
+               }
+
+               parent->id = strdup(pe->metric_name);
+               if (!parent->id) {
+                       free(m);
+                       return -ENOMEM;
+               }
+       } else {
+               /*
+                * We got here for the referenced metric, via the
+                * recursive metricgroup__add_metric call, add
+                * it to the parent group.
+                */
+               m = *mp;
 
-       expr__ctx_init(&eg->pctx);
-       eg->metric_name = pe->metric_name;
-       eg->metric_expr = pe->metric_expr;
-       eg->metric_unit = pe->unit;
-       eg->runtime = runtime;
-       eg->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
+               ref = malloc(sizeof(*ref));
+               if (!ref)
+                       return -ENOMEM;
 
-       if (expr__find_other(pe->metric_expr, NULL, &eg->pctx, runtime) < 0) {
-               expr__ctx_clear(&eg->pctx);
-               free(eg);
+               /*
+                * Intentionally passing just const char pointers,
+                * from 'pe' object, so they never go away. We don't
+                * need to change them, so there's no need to create
+                * our own copy.
+                */
+               ref->metric_name = pe->metric_name;
+               ref->metric_expr = pe->metric_expr;
+
+               list_add(&ref->list, &m->metric_refs);
+               m->metric_refs_cnt++;
+       }
+
+       /* Force all found IDs in metric to have us as parent ID. */
+       WARN_ON_ONCE(!parent);
+       m->pctx.parent = parent;
+
+       /*
+        * For both the parent and referenced metrics, we parse
+        * all the metric's IDs and add it to the parent context.
+        */
+       if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
+               expr__ctx_clear(&m->pctx);
+               free(m);
                return -EINVAL;
        }
 
-       if (list_empty(group_list))
-               list_add(&eg->nd, group_list);
+       /*
+        * We add new group only in the 'parent' call,
+        * so bail out for referenced metric case.
+        */
+       if (m->metric_refs_cnt)
+               return 0;
+
+       if (list_empty(metric_list))
+               list_add(&m->nd, metric_list);
        else {
                struct list_head *pos;
 
                /* Place the largest groups at the front. */
-               list_for_each_prev(pos, group_list) {
-                       struct egroup *old = list_entry(pos, struct egroup, nd);
+               list_for_each_prev(pos, metric_list) {
+                       struct metric *old = list_entry(pos, struct metric, nd);
 
-                       if (hashmap__size(&eg->pctx.ids) <=
+                       if (hashmap__size(&m->pctx.ids) <=
                            hashmap__size(&old->pctx.ids))
                                break;
                }
-               list_add(&eg->nd, pos);
+               list_add(&m->nd, pos);
        }
 
        return 0;
 }
 
+#define map_for_each_event(__pe, __idx, __map)                         \
+       for (__idx = 0, __pe = &__map->table[__idx];                    \
+            __pe->name || __pe->metric_group || __pe->metric_name;     \
+            __pe = &__map->table[++__idx])
+
+#define map_for_each_metric(__pe, __idx, __map, __metric)              \
+       map_for_each_event(__pe, __idx, __map)                          \
+               if (__pe->metric_expr &&                                \
+                   (match_metric(__pe->metric_group, __metric) ||      \
+                    match_metric(__pe->metric_name, __metric)))
+
+static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
+{
+       struct pmu_event *pe;
+       int i;
+
+       map_for_each_event(pe, i, map) {
+               if (match_metric(pe->metric_name, metric))
+                       return pe;
+       }
+
+       return NULL;
+}
+
+static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
+                          struct expr_ids *ids)
+{
+       struct expr_id_data *data;
+       struct expr_id *p;
+       int ret;
+
+       /*
+        * We get the parent referenced by 'id' argument and
+        * traverse through all the parent object IDs to check
+        * if we already processed 'id', if we did, it's recursion
+        * and we fail.
+        */
+       ret = expr__get_id(&m->pctx, id, &data);
+       if (ret)
+               return ret;
+
+       p = data->parent;
+
+       while (p->parent) {
+               if (!strcmp(p->id, id)) {
+                       pr_err("failed: recursion detected for %s\n", id);
+                       return -1;
+               }
+               p = p->parent;
+       }
+
+       /*
+        * If we are over the limit of static entris, the metric
+        * is too difficult/nested to process, fail as well.
+        */
+       p = expr_ids__alloc(ids);
+       if (!p) {
+               pr_err("failed: too many nested metrics\n");
+               return -EINVAL;
+       }
+
+       p->id     = strdup(id);
+       p->parent = data->parent;
+       *parent   = p;
+
+       return p->id ? 0 : -ENOMEM;
+}
+
+static int add_metric(struct list_head *metric_list,
+                     struct pmu_event *pe,
+                     bool metric_no_group,
+                     struct metric **mp,
+                     struct expr_id *parent,
+                     struct expr_ids *ids);
+
+static int __resolve_metric(struct metric *m,
+                           bool metric_no_group,
+                           struct list_head *metric_list,
+                           struct pmu_events_map *map,
+                           struct expr_ids *ids)
+{
+       struct hashmap_entry *cur;
+       size_t bkt;
+       bool all;
+       int ret;
+
+       /*
+        * Iterate all the parsed IDs and if there's metric,
+        * add it to the context.
+        */
+       do {
+               all = true;
+               hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
+                       struct expr_id *parent;
+                       struct pmu_event *pe;
+
+                       pe = find_metric(cur->key, map);
+                       if (!pe)
+                               continue;
+
+                       ret = recursion_check(m, cur->key, &parent, ids);
+                       if (ret)
+                               return ret;
+
+                       all = false;
+                       /* The metric key itself needs to go out.. */
+                       expr__del_id(&m->pctx, cur->key);
+
+                       /* ... and it gets resolved to the parent context. */
+                       ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
+                       if (ret)
+                               return ret;
+
+                       /*
+                        * We added new metric to hashmap, so we need
+                        * to break the iteration and start over.
+                        */
+                       break;
+               }
+       } while (!all);
+
+       return 0;
+}
+
+static int resolve_metric(bool metric_no_group,
+                         struct list_head *metric_list,
+                         struct pmu_events_map *map,
+                         struct expr_ids *ids)
+{
+       struct metric *m;
+       int err;
+
+       list_for_each_entry(m, metric_list, nd) {
+               err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int add_metric(struct list_head *metric_list,
+                     struct pmu_event *pe,
+                     bool metric_no_group,
+                     struct metric **m,
+                     struct expr_id *parent,
+                     struct expr_ids *ids)
+{
+       struct metric *orig = *m;
+       int ret = 0;
+
+       pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
+
+       if (!strstr(pe->metric_expr, "?")) {
+               ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
+       } else {
+               int j, count;
+
+               count = arch_get_runtimeparam();
+
+               /* This loop is added to create multiple
+                * events depend on count value and add
+                * those events to metric_list.
+                */
+
+               for (j = 0; j < count && !ret; j++, *m = orig)
+                       ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
+       }
+
+       return ret;
+}
+
 static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                                   struct strbuf *events,
-                                  struct list_head *group_list)
+                                  struct list_head *metric_list,
+                                  struct pmu_events_map *map)
 {
-       struct pmu_events_map *map = perf_pmu__find_map(NULL);
+       struct expr_ids ids = { .cnt = 0, };
        struct pmu_event *pe;
-       struct egroup *eg;
+       struct metric *m;
+       LIST_HEAD(list);
        int i, ret;
        bool has_match = false;
 
-       if (!map)
-               return 0;
+       map_for_each_metric(pe, i, map, metric) {
+               has_match = true;
+               m = NULL;
 
-       for (i = 0; ; i++) {
-               pe = &map->table[i];
+               ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
+               if (ret)
+                       return ret;
 
-               if (!pe->name && !pe->metric_group && !pe->metric_name) {
-                       /* End of pmu events. */
-                       if (!has_match)
-                               return -EINVAL;
-                       break;
-               }
-               if (!pe->metric_expr)
-                       continue;
-               if (match_metric(pe->metric_group, metric) ||
-                   match_metric(pe->metric_name, metric)) {
-                       has_match = true;
-                       pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
-
-                       if (!strstr(pe->metric_expr, "?")) {
-                               ret = __metricgroup__add_metric(group_list,
-                                                               pe,
-                                                               metric_no_group,
-                                                               1);
-                               if (ret)
-                                       return ret;
-                       } else {
-                               int j, count;
-
-                               count = arch_get_runtimeparam();
-
-                               /* This loop is added to create multiple
-                                * events depend on count value and add
-                                * those events to group_list.
-                                */
-
-                               for (j = 0; j < count; j++) {
-                                       ret = __metricgroup__add_metric(
-                                               group_list, pe,
-                                               metric_no_group, j);
-                                       if (ret)
-                                               return ret;
-                               }
-                       }
-               }
+               /*
+                * Process any possible referenced metrics
+                * included in the expression.
+                */
+               ret = resolve_metric(metric_no_group,
+                                    &list, map, &ids);
+               if (ret)
+                       return ret;
        }
-       list_for_each_entry(eg, group_list, nd) {
+
+       /* End of pmu events. */
+       if (!has_match)
+               return -EINVAL;
+
+       list_for_each_entry(m, &list, nd) {
                if (events->len > 0)
                        strbuf_addf(events, ",");
 
-               if (eg->has_constraint) {
+               if (m->has_constraint) {
                        metricgroup__add_metric_non_group(events,
-                                                         &eg->pctx);
+                                                         &m->pctx);
                } else {
                        metricgroup__add_metric_weak_group(events,
-                                                          &eg->pctx);
+                                                          &m->pctx);
                }
        }
+
+       list_splice(&list, metric_list);
+       expr_ids__exit(&ids);
        return 0;
 }
 
 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
                                        struct strbuf *events,
-                                       struct list_head *group_list)
+                                       struct list_head *metric_list,
+                                       struct pmu_events_map *map)
 {
        char *llist, *nlist, *p;
        int ret = -EINVAL;
@@ -683,7 +986,7 @@ static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
 
        while ((p = strsep(&llist, ",")) != NULL) {
                ret = metricgroup__add_metric(p, metric_no_group, events,
-                                             group_list);
+                                             metric_list, map);
                if (ret == -EINVAL) {
                        fprintf(stderr, "Cannot find metric or group `%s'\n",
                                        p);
@@ -698,50 +1001,88 @@ static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
        return ret;
 }
 
-static void metricgroup__free_egroups(struct list_head *group_list)
+static void metric__free_refs(struct metric *metric)
 {
-       struct egroup *eg, *egtmp;
+       struct metric_ref_node *ref, *tmp;
 
-       list_for_each_entry_safe (eg, egtmp, group_list, nd) {
-               expr__ctx_clear(&eg->pctx);
-               list_del_init(&eg->nd);
-               free(eg);
+       list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
+               list_del(&ref->list);
+               free(ref);
        }
 }
 
-int metricgroup__parse_groups(const struct option *opt,
-                             const char *str,
-                             bool metric_no_group,
-                             bool metric_no_merge,
-                             struct rblist *metric_events)
+static void metricgroup__free_metrics(struct list_head *metric_list)
+{
+       struct metric *m, *tmp;
+
+       list_for_each_entry_safe (m, tmp, metric_list, nd) {
+               metric__free_refs(m);
+               expr__ctx_clear(&m->pctx);
+               list_del_init(&m->nd);
+               free(m);
+       }
+}
+
+static int parse_groups(struct evlist *perf_evlist, const char *str,
+                       bool metric_no_group,
+                       bool metric_no_merge,
+                       struct perf_pmu *fake_pmu,
+                       struct rblist *metric_events,
+                       struct pmu_events_map *map)
 {
        struct parse_events_error parse_error;
-       struct evlist *perf_evlist = *(struct evlist **)opt->value;
        struct strbuf extra_events;
-       LIST_HEAD(group_list);
+       LIST_HEAD(metric_list);
        int ret;
 
        if (metric_events->nr_entries == 0)
                metricgroup__rblist_init(metric_events);
        ret = metricgroup__add_metric_list(str, metric_no_group,
-                                          &extra_events, &group_list);
+                                          &extra_events, &metric_list, map);
        if (ret)
                return ret;
        pr_debug("adding %s\n", extra_events.buf);
        bzero(&parse_error, sizeof(parse_error));
-       ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
+       ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
        if (ret) {
                parse_events_print_error(&parse_error, extra_events.buf);
                goto out;
        }
        strbuf_release(&extra_events);
-       ret = metricgroup__setup_events(&group_list, metric_no_merge,
+       ret = metricgroup__setup_events(&metric_list, metric_no_merge,
                                        perf_evlist, metric_events);
 out:
-       metricgroup__free_egroups(&group_list);
+       metricgroup__free_metrics(&metric_list);
        return ret;
 }
 
+int metricgroup__parse_groups(const struct option *opt,
+                             const char *str,
+                             bool metric_no_group,
+                             bool metric_no_merge,
+                             struct rblist *metric_events)
+{
+       struct evlist *perf_evlist = *(struct evlist **)opt->value;
+       struct pmu_events_map *map = perf_pmu__find_map(NULL);
+
+       if (!map)
+               return 0;
+
+       return parse_groups(perf_evlist, str, metric_no_group,
+                           metric_no_merge, NULL, metric_events, map);
+}
+
+int metricgroup__parse_groups_test(struct evlist *evlist,
+                                  struct pmu_events_map *map,
+                                  const char *str,
+                                  bool metric_no_group,
+                                  bool metric_no_merge,
+                                  struct rblist *metric_events)
+{
+       return parse_groups(evlist, str, metric_no_group,
+                           metric_no_merge, &perf_pmu__fake, metric_events, map);
+}
+
 bool metricgroup__has_metric(const char *metric)
 {
        struct pmu_events_map *map = perf_pmu__find_map(NULL);
index 287850b..62623a3 100644 (file)
@@ -7,8 +7,10 @@
 #include <stdbool.h>
 
 struct evsel;
+struct evlist;
 struct option;
 struct rblist;
+struct pmu_events_map;
 
 struct metric_event {
        struct rb_node nd;
@@ -16,12 +18,18 @@ struct metric_event {
        struct list_head head; /* list of metric_expr */
 };
 
+struct metric_ref {
+       const char *metric_name;
+       const char *metric_expr;
+};
+
 struct metric_expr {
        struct list_head nd;
        const char *metric_expr;
        const char *metric_name;
        const char *metric_unit;
        struct evsel **metric_events;
+       struct metric_ref *metric_refs;
        int runtime;
 };
 
@@ -34,8 +42,16 @@ int metricgroup__parse_groups(const struct option *opt,
                              bool metric_no_merge,
                              struct rblist *metric_events);
 
+int metricgroup__parse_groups_test(struct evlist *evlist,
+                                  struct pmu_events_map *map,
+                                  const char *str,
+                                  bool metric_no_group,
+                                  bool metric_no_merge,
+                                  struct rblist *metric_events);
+
 void metricgroup__print(bool metrics, bool groups, char *filter,
                        bool raw, bool details);
 bool metricgroup__has_metric(const char *metric);
 int arch_get_runtimeparam(void);
+void metricgroup__rblist_exit(struct rblist *metric_events);
 #endif
index 3decbb2..9f7260e 100644 (file)
@@ -767,8 +767,8 @@ int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
 
        return 0;
 errout:
-       parse_state->error->help = strdup("(add -v to see detail)");
-       parse_state->error->str = strdup(errbuf);
+       parse_events__handle_error(parse_state->error, 0,
+                               strdup(errbuf), strdup("(add -v to see detail)"));
        return err;
 }
 
@@ -784,36 +784,38 @@ parse_events_config_bpf(struct parse_events_state *parse_state,
                return 0;
 
        list_for_each_entry(term, head_config, list) {
-               char errbuf[BUFSIZ];
                int err;
 
                if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
-                       snprintf(errbuf, sizeof(errbuf),
-                                "Invalid config term for BPF object");
-                       errbuf[BUFSIZ - 1] = '\0';
-
-                       parse_state->error->idx = term->err_term;
-                       parse_state->error->str = strdup(errbuf);
+                       parse_events__handle_error(parse_state->error, term->err_term,
+                                               strdup("Invalid config term for BPF object"),
+                                               NULL);
                        return -EINVAL;
                }
 
                err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
                if (err) {
+                       char errbuf[BUFSIZ];
+                       int idx;
+
                        bpf__strerror_config_obj(obj, term, parse_state->evlist,
                                                 &error_pos, err, errbuf,
                                                 sizeof(errbuf));
-                       parse_state->error->help = strdup(
+
+                       if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
+                               idx = term->err_val;
+                       else
+                               idx = term->err_term + error_pos;
+
+                       parse_events__handle_error(parse_state->error, idx,
+                                               strdup(errbuf),
+                                               strdup(
 "Hint:\tValid config terms:\n"
 "     \tmap:[<arraymap>].value<indices>=[value]\n"
 "     \tmap:[<eventmap>].event<indices>=[event]\n"
 "\n"
 "     \twhere <indices> is something like [0,3...5] or [all]\n"
-"     \t(add -v to see detail)");
-                       parse_state->error->str = strdup(errbuf);
-                       if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
-                               parse_state->error->idx = term->err_val;
-                       else
-                               parse_state->error->idx = term->err_term + error_pos;
+"     \t(add -v to see detail)"));
                        return err;
                }
        }
@@ -877,8 +879,8 @@ int parse_events_load_bpf(struct parse_events_state *parse_state,
                                                   -err, errbuf,
                                                   sizeof(errbuf));
 
-               parse_state->error->help = strdup("(add -v to see detail)");
-               parse_state->error->str = strdup(errbuf);
+               parse_events__handle_error(parse_state->error, 0,
+                                       strdup(errbuf), strdup("(add -v to see detail)"));
                return err;
        }
 
@@ -1450,7 +1452,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                fprintf(stderr, "' that may result in non-fatal errors\n");
        }
 
-       pmu = perf_pmu__find(name);
+       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
        if (!pmu) {
                char *err_str;
 
@@ -1483,7 +1485,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                }
        }
 
-       if (perf_pmu__check_alias(pmu, head_config, &info))
+       if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
                return -EINVAL;
 
        if (verbose > 1) {
@@ -1516,7 +1518,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
                return -ENOMEM;
 
-       if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
+       if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
                struct evsel_config_term *pos, *tmp;
 
                list_for_each_entry_safe(pos, tmp, &config_terms, list) {
@@ -2017,6 +2019,32 @@ err:
        perf_pmu__parse_cleanup();
 }
 
+/*
+ * This function injects special term in
+ * perf_pmu_events_list so the test code
+ * can check on this functionality.
+ */
+int perf_pmu__test_parse_init(void)
+{
+       struct perf_pmu_event_symbol *list;
+
+       list = malloc(sizeof(*list) * 1);
+       if (!list)
+               return -ENOMEM;
+
+       list->type   = PMU_EVENT_SYMBOL;
+       list->symbol = strdup("read");
+
+       if (!list->symbol) {
+               free(list);
+               return -ENOMEM;
+       }
+
+       perf_pmu_events_list = list;
+       perf_pmu_events_list_num = 1;
+       return 0;
+}
+
 enum perf_pmu_event_symbol_type
 perf_pmu__parse_check(const char *name)
 {
@@ -2078,6 +2106,8 @@ int parse_events_terms(struct list_head *terms, const char *str)
        int ret;
 
        ret = parse_events__scanner(str, &parse_state);
+       perf_pmu__parse_cleanup();
+
        if (!ret) {
                list_splice(parse_state.terms, terms);
                zfree(&parse_state.terms);
@@ -2088,15 +2118,16 @@ int parse_events_terms(struct list_head *terms, const char *str)
        return ret;
 }
 
-int parse_events(struct evlist *evlist, const char *str,
-                struct parse_events_error *err)
+int __parse_events(struct evlist *evlist, const char *str,
+                  struct parse_events_error *err, struct perf_pmu *fake_pmu)
 {
        struct parse_events_state parse_state = {
-               .list   = LIST_HEAD_INIT(parse_state.list),
-               .idx    = evlist->core.nr_entries,
-               .error  = err,
-               .evlist = evlist,
-               .stoken = PE_START_EVENTS,
+               .list     = LIST_HEAD_INIT(parse_state.list),
+               .idx      = evlist->core.nr_entries,
+               .error    = err,
+               .evlist   = evlist,
+               .stoken   = PE_START_EVENTS,
+               .fake_pmu = fake_pmu,
        };
        int ret;
 
index 1fe23a2..00cde7d 100644 (file)
@@ -33,8 +33,15 @@ const char *event_type(int type);
 
 int parse_events_option(const struct option *opt, const char *str, int unset);
 int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset);
-int parse_events(struct evlist *evlist, const char *str,
-                struct parse_events_error *error);
+int __parse_events(struct evlist *evlist, const char *str, struct parse_events_error *error,
+                  struct perf_pmu *fake_pmu);
+
+static inline int parse_events(struct evlist *evlist, const char *str,
+                              struct parse_events_error *err)
+{
+       return __parse_events(evlist, str, err, NULL);
+}
+
 int parse_events_terms(struct list_head *terms, const char *str);
 int parse_filter(const struct option *opt, const char *str, int unset);
 int exclude_perf(const struct option *opt, const char *arg, int unset);
@@ -127,9 +134,10 @@ struct parse_events_state {
        int                        idx;
        int                        nr_groups;
        struct parse_events_error *error;
-       struct evlist     *evlist;
+       struct evlist             *evlist;
        struct list_head          *terms;
        int                        stoken;
+       struct perf_pmu           *fake_pmu;
 };
 
 void parse_events__handle_error(struct parse_events_error *err, int idx,
@@ -253,4 +261,6 @@ static inline bool is_sdt_event(char *str __maybe_unused)
 }
 #endif /* HAVE_LIBELF_SUPPORT */
 
+int perf_pmu__test_parse_init(void);
+
 #endif /* __PERF_PARSE_EVENTS_H */
index 002802e..3ca5fd2 100644 (file)
@@ -41,14 +41,6 @@ static int value(yyscan_t scanner, int base)
        return __value(yylval, text, base, PE_VALUE);
 }
 
-static int raw(yyscan_t scanner)
-{
-       YYSTYPE *yylval = parse_events_get_lval(scanner);
-       char *text = parse_events_get_text(scanner);
-
-       return __value(yylval, text + 1, 16, PE_RAW);
-}
-
 static int str(yyscan_t scanner, int token)
 {
        YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -72,6 +64,17 @@ static int str(yyscan_t scanner, int token)
        return token;
 }
 
+static int raw(yyscan_t scanner)
+{
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+       char *text = parse_events_get_text(scanner);
+
+       if (perf_pmu__parse_check(text) == PMU_EVENT_SYMBOL)
+               return str(scanner, PE_NAME);
+
+       return __value(yylval, text + 1, 16, PE_RAW);
+}
+
 static bool isbpf_suffix(char *text)
 {
        int len = strlen(text);
@@ -129,12 +132,16 @@ do {                                                              \
        yyless(0);                                              \
 } while (0)
 
-static int pmu_str_check(yyscan_t scanner)
+static int pmu_str_check(yyscan_t scanner, struct parse_events_state *parse_state)
 {
        YYSTYPE *yylval = parse_events_get_lval(scanner);
        char *text = parse_events_get_text(scanner);
 
        yylval->str = strdup(text);
+
+       if (parse_state->fake_pmu)
+               return PE_PMU_EVENT_FAKE;
+
        switch (perf_pmu__parse_check(text)) {
                case PMU_EVENT_SYMBOL_PREFIX:
                        return PE_PMU_EVENT_PRE;
@@ -289,6 +296,7 @@ percore                     { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
 aux-output             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT); }
 aux-sample-size                { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE); }
 r{num_raw_hex}         { return raw(yyscanner); }
+r0x{num_raw_hex}       { return raw(yyscanner); }
 ,                      { return ','; }
 "/"                    { BEGIN(INITIAL); return '/'; }
 {name_minus}           { return str(yyscanner, PE_NAME); }
@@ -376,7 +384,7 @@ r{num_raw_hex}              { return raw(yyscanner); }
 {modifier_event}       { return str(yyscanner, PE_MODIFIER_EVENT); }
 {bpf_object}           { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
 {bpf_source}           { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
-{name}                 { return pmu_str_check(yyscanner); }
+{name}                 { return pmu_str_check(yyscanner, _parse_state); }
 {name_tag}             { return str(yyscanner, PE_NAME); }
 "/"                    { BEGIN(config); return '/'; }
 -                      { return '-'; }
index acef87d..b9fb91f 100644 (file)
@@ -69,7 +69,7 @@ static void inc_group_count(struct list_head *list,
 %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
 %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
 %token PE_ERROR
-%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
+%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
 %token PE_ARRAY_ALL PE_ARRAY_RANGE
 %token PE_DRV_CFG_TERM
 %type <num> PE_VALUE
@@ -87,7 +87,7 @@ static void inc_group_count(struct list_head *list,
 %type <str> PE_MODIFIER_EVENT
 %type <str> PE_MODIFIER_BP
 %type <str> PE_EVENT_NAME
-%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
+%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT PE_PMU_EVENT_FAKE
 %type <str> PE_DRV_CFG_TERM
 %destructor { free ($$); } <str>
 %type <term> event_term
@@ -356,6 +356,43 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
                YYABORT;
        $$ = list;
 }
+|
+PE_PMU_EVENT_FAKE sep_dc
+{
+       struct list_head *list;
+       int err;
+
+       list = alloc_list();
+       if (!list)
+               YYABORT;
+
+       err = parse_events_add_pmu(_parse_state, list, $1, NULL, false, false);
+       free($1);
+       if (err < 0) {
+               free(list);
+               YYABORT;
+       }
+       $$ = list;
+}
+|
+PE_PMU_EVENT_FAKE opt_pmu_config
+{
+       struct list_head *list;
+       int err;
+
+       list = alloc_list();
+       if (!list)
+               YYABORT;
+
+       err = parse_events_add_pmu(_parse_state, list, $1, $2, false, false);
+       free($1);
+       parse_events_terms__delete($2);
+       if (err < 0) {
+               free(list);
+               YYABORT;
+       }
+       $$ = list;
+}
 
 value_sym:
 PE_VALUE_SYM_HW
diff --git a/tools/perf/util/parse-sublevel-options.c b/tools/perf/util/parse-sublevel-options.c
new file mode 100644 (file)
index 0000000..a841d17
--- /dev/null
@@ -0,0 +1,70 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "util/debug.h"
+#include "util/parse-sublevel-options.h"
+
+static int parse_one_sublevel_option(const char *str,
+                                    struct sublevel_option *opts)
+{
+       struct sublevel_option *opt = opts;
+       char *vstr, *s = strdup(str);
+       int v = 1;
+
+       if (!s) {
+               pr_err("no memory\n");
+               return -1;
+       }
+
+       vstr = strchr(s, '=');
+       if (vstr)
+               *vstr++ = 0;
+
+       while (opt->name) {
+               if (!strcmp(s, opt->name))
+                       break;
+               opt++;
+       }
+
+       if (!opt->name) {
+               pr_err("Unknown option name '%s'\n", s);
+               free(s);
+               return -1;
+       }
+
+       if (vstr)
+               v = atoi(vstr);
+
+       *opt->value_ptr = v;
+       free(s);
+       return 0;
+}
+
+/* parse options like --foo a=<n>,b,c... */
+int perf_parse_sublevel_options(const char *str, struct sublevel_option *opts)
+{
+       char *s = strdup(str);
+       char *p = NULL;
+       int ret;
+
+       if (!s) {
+               pr_err("no memory\n");
+               return -1;
+       }
+
+       p = strtok(s, ",");
+       while (p) {
+               ret = parse_one_sublevel_option(p, opts);
+               if (ret) {
+                       free(s);
+                       return ret;
+               }
+
+               p = strtok(NULL, ",");
+       }
+
+       free(s);
+       return 0;
+}
diff --git a/tools/perf/util/parse-sublevel-options.h b/tools/perf/util/parse-sublevel-options.h
new file mode 100644 (file)
index 0000000..9b9efcc
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef _PERF_PARSE_SUBLEVEL_OPTIONS_H
+#define _PERF_PARSE_SUBLEVEL_OPTIONS_H
+
+struct sublevel_option {
+       const char *name;
+       int *value_ptr;
+};
+
+int perf_parse_sublevel_options(const char *str, struct sublevel_option *opts);
+
+#endif
\ No newline at end of file
index 1337965..3840d02 100644 (file)
@@ -93,6 +93,11 @@ static void perf_probe_context_switch(struct evsel *evsel)
        evsel->core.attr.context_switch = 1;
 }
 
+static void perf_probe_text_poke(struct evsel *evsel)
+{
+       evsel->core.attr.text_poke = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
@@ -108,6 +113,11 @@ bool perf_can_record_switch_events(void)
        return perf_probe_api(perf_probe_context_switch);
 }
 
+bool perf_can_record_text_poke_events(void)
+{
+       return perf_probe_api(perf_probe_text_poke);
+}
+
 bool perf_can_record_cpu_wide(void)
 {
        struct perf_event_attr attr = {
index 706c3c6..d5506a9 100644 (file)
@@ -9,6 +9,7 @@ bool perf_can_aux_sample(void);
 bool perf_can_comm_exec(void);
 bool perf_can_record_cpu_wide(void);
 bool perf_can_record_switch_events(void);
+bool perf_can_record_text_poke_events(void);
 bool perf_can_sample_identifier(void);
 
 #endif // __PERF_API_PROBE_H
index b94fa07..e67a227 100644 (file)
@@ -147,6 +147,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(aux_watermark, p_unsigned);
        PRINT_ATTRf(sample_max_stack, p_unsigned);
        PRINT_ATTRf(aux_sample_size, p_unsigned);
+       PRINT_ATTRf(text_poke, p_unsigned);
 
        return ret;
 }
index 93fe72a..f1688e1 100644 (file)
@@ -26,6 +26,8 @@
 #include "strbuf.h"
 #include "fncache.h"
 
+struct perf_pmu perf_pmu__fake;
+
 struct perf_pmu_format {
        char *name;
        int value;
@@ -1400,6 +1402,7 @@ struct sevent {
        char *pmu;
        char *metric_expr;
        char *metric_name;
+       int is_cpu;
 };
 
 static int cmp_sevent(const void *a, const void *b)
@@ -1416,6 +1419,11 @@ static int cmp_sevent(const void *a, const void *b)
                if (n)
                        return n;
        }
+
+       /* Order CPU core events to be first */
+       if (as->is_cpu != bs->is_cpu)
+               return bs->is_cpu - as->is_cpu;
+
        return strcmp(as->name, bs->name);
 }
 
@@ -1475,7 +1483,7 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
                list_for_each_entry(alias, &pmu->aliases, list) {
                        char *name = alias->desc ? alias->name :
                                format_alias(buf, sizeof(buf), pmu, alias);
-                       bool is_cpu = !strcmp(pmu->name, "cpu");
+                       bool is_cpu = is_pmu_core(pmu->name);
 
                        if (alias->deprecated && !deprecated)
                                continue;
@@ -1507,6 +1515,7 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
                        aliases[j].pmu = pmu->name;
                        aliases[j].metric_expr = alias->metric_expr;
                        aliases[j].metric_name = alias->metric_name;
+                       aliases[j].is_cpu = is_cpu;
                        j++;
                }
                if (pmu->selectable &&
index f971d9a..44ccbdb 100644 (file)
@@ -43,6 +43,8 @@ struct perf_pmu {
        struct list_head list;    /* ELEM */
 };
 
+extern struct perf_pmu perf_pmu__fake;
+
 struct perf_pmu_info {
        const char *unit;
        const char *metric_expr;
index df713a5..99d36ac 100644 (file)
@@ -375,9 +375,13 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
 
        /* Find the address of given function */
        map__for_each_symbol_by_name(map, pp->function, sym) {
-               if (uprobes)
+               if (uprobes) {
                        address = sym->start;
-               else
+                       if (sym->type == STT_GNU_IFUNC)
+                               pr_warning("Warning: The probe function (%s) is a GNU indirect function.\n"
+                                          "Consider identifying the final function used at run time and set the probe directly on that.\n",
+                                          pp->function);
+               } else
                        address = map->unmap_ip(map, sym->start) - map->reloc;
                break;
        }
@@ -2968,6 +2972,16 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        for (j = 0; j < num_matched_functions; j++) {
                sym = syms[j];
 
+               /* There can be duplicated symbols in the map */
+               for (i = 0; i < j; i++)
+                       if (sym->start == syms[i]->start) {
+                               pr_debug("Found duplicated symbol %s @ %" PRIx64 "\n",
+                                        sym->name, sym->start);
+                               break;
+                       }
+               if (i != j)
+                       continue;
+
                tev = (*tevs) + ret;
                tp = &tev->point;
                if (ret == num_matched_functions) {
index 5592425..6590243 100644 (file)
@@ -1408,6 +1408,9 @@ static int fill_empty_trace_arg(struct perf_probe_event *pev,
        char *type;
        int i, j, ret;
 
+       if (!ntevs)
+               return -ENOENT;
+
        for (i = 0; i < pev->nargs; i++) {
                type = NULL;
                for (j = 0; j < ntevs; j++) {
@@ -1464,7 +1467,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
        if (ret >= 0 && tf.pf.skip_empty_arg)
                ret = fill_empty_trace_arg(pev, tf.tevs, tf.ntevs);
 
-       if (ret < 0) {
+       if (ret < 0 || tf.ntevs == 0) {
                for (i = 0; i < tf.ntevs; i++)
                        clear_probe_trace_event(&tf.tevs[i]);
                zfree(tevs);
index 39d1de4..03678ff 100644 (file)
@@ -48,6 +48,7 @@ struct record_opts {
        bool          sample_id;
        bool          no_bpf_event;
        bool          kcore;
+       bool          text_poke;
        unsigned int  freq;
        unsigned int  mmap_pages;
        unsigned int  auxtrace_mmap_pages;
@@ -61,7 +62,7 @@ struct record_opts {
        const char    *auxtrace_snapshot_opts;
        const char    *auxtrace_sample_opts;
        bool          sample_transaction;
-       unsigned      initial_delay;
+       int           initial_delay;
        bool          use_clockid;
        clockid_t     clockid;
        u64           clockid_res_ns;
@@ -70,6 +71,8 @@ struct record_opts {
        int           mmap_flush;
        unsigned int  comp_level;
        unsigned int  nr_threads_synthesize;
+       int           ctl_fd;
+       int           ctl_fd_ack;
 };
 
 extern const char * const *record_usage;
index 1a157e8..ffbc9d3 100644 (file)
@@ -115,12 +115,12 @@ static int perf_session__open(struct perf_session *session)
        if (perf_header__has_feat(&session->header, HEADER_STAT))
                return 0;
 
-       if (!perf_evlist__valid_sample_type(session->evlist)) {
+       if (!evlist__valid_sample_type(session->evlist)) {
                pr_err("non matching sample_type\n");
                return -1;
        }
 
-       if (!perf_evlist__valid_sample_id_all(session->evlist)) {
+       if (!evlist__valid_sample_id_all(session->evlist)) {
                pr_err("non matching sample_id_all\n");
                return -1;
        }
@@ -252,10 +252,10 @@ struct perf_session *perf_session__new(struct perf_data *data,
 
        /*
         * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
-        * processed, so perf_evlist__sample_id_all is not meaningful here.
+        * processed, so evlist__sample_id_all is not meaningful here.
         */
        if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
-           tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
+           tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
                dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
                tool->ordered_events = false;
        }
@@ -490,6 +490,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->ksymbol = perf_event__process_ksymbol;
        if (tool->bpf == NULL)
                tool->bpf = perf_event__process_bpf;
+       if (tool->text_poke == NULL)
+               tool->text_poke = perf_event__process_text_poke;
        if (tool->read == NULL)
                tool->read = process_event_sample_stub;
        if (tool->throttle == NULL)
@@ -659,6 +661,24 @@ static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
                swap_sample_id_all(event, &event->context_switch + 1);
 }
 
+static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
+{
+       event->text_poke.addr    = bswap_64(event->text_poke.addr);
+       event->text_poke.old_len = bswap_16(event->text_poke.old_len);
+       event->text_poke.new_len = bswap_16(event->text_poke.new_len);
+
+       if (sample_id_all) {
+               size_t len = sizeof(event->text_poke.old_len) +
+                            sizeof(event->text_poke.new_len) +
+                            event->text_poke.old_len +
+                            event->text_poke.new_len;
+               void *data = &event->text_poke.old_len;
+
+               data += PERF_ALIGN(len, sizeof(u64));
+               swap_sample_id_all(event, data);
+       }
+}
+
 static void perf_event__throttle_swap(union perf_event *event,
                                      bool sample_id_all)
 {
@@ -932,6 +952,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
        [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
        [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
        [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
+       [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
        [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
        [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
        [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
@@ -1160,10 +1181,10 @@ static void perf_evlist__print_tstamp(struct evlist *evlist,
                                       union perf_event *event,
                                       struct perf_sample *sample)
 {
-       u64 sample_type = __perf_evlist__combined_sample_type(evlist);
+       u64 sample_type = __evlist__combined_sample_type(evlist);
 
        if (event->header.type != PERF_RECORD_SAMPLE &&
-           !perf_evlist__sample_id_all(evlist)) {
+           !evlist__sample_id_all(evlist)) {
                fputs("-1 -1 ", stdout);
                return;
        }
@@ -1474,6 +1495,8 @@ static int machines__deliver_event(struct machines *machines,
                return tool->ksymbol(tool, event, sample, machine);
        case PERF_RECORD_BPF_EVENT:
                return tool->bpf(tool, event, sample, machine);
+       case PERF_RECORD_TEXT_POKE:
+               return tool->text_poke(tool, event, sample, machine);
        default:
                ++evlist->stats.nr_unknown_events;
                return -1;
@@ -1655,7 +1678,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
                return -1;
 
        if (session->header.needs_swap)
-               event_swap(event, perf_evlist__sample_id_all(session->evlist));
+               event_swap(event, evlist__sample_id_all(session->evlist));
 
 out_parse_sample:
 
@@ -1704,7 +1727,7 @@ static s64 perf_session__process_event(struct perf_session *session,
        int ret;
 
        if (session->header.needs_swap)
-               event_swap(event, perf_evlist__sample_id_all(evlist));
+               event_swap(event, evlist__sample_id_all(evlist));
 
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
index a7c13a8..e1ba6c1 100644 (file)
@@ -730,25 +730,17 @@ static void print_smi_cost(struct perf_stat_config *config,
        out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
 }
 
-static void generic_metric(struct perf_stat_config *config,
-                          const char *metric_expr,
-                          struct evsel **metric_events,
-                          char *name,
-                          const char *metric_name,
-                          const char *metric_unit,
-                          int runtime,
-                          int cpu,
-                          struct perf_stat_output_ctx *out,
-                          struct runtime_stat *st)
+static int prepare_metric(struct evsel **metric_events,
+                         struct metric_ref *metric_refs,
+                         struct expr_parse_ctx *pctx,
+                         int cpu,
+                         struct runtime_stat *st)
 {
-       print_metric_t print_metric = out->print_metric;
-       struct expr_parse_ctx pctx;
-       double ratio, scale;
-       int i;
-       void *ctxp = out->ctx;
+       double scale;
        char *n, *pn;
+       int i, j, ret;
 
-       expr__ctx_init(&pctx);
+       expr__ctx_init(pctx);
        for (i = 0; metric_events[i]; i++) {
                struct saved_value *v;
                struct stats *stats;
@@ -771,7 +763,7 @@ static void generic_metric(struct perf_stat_config *config,
 
                n = strdup(metric_events[i]->name);
                if (!n)
-                       return;
+                       return -ENOMEM;
                /*
                 * This display code with --no-merge adds [cpu] postfixes.
                 * These are not supported by the parser. Remove everything
@@ -782,11 +774,42 @@ static void generic_metric(struct perf_stat_config *config,
                        *pn = 0;
 
                if (metric_total)
-                       expr__add_id(&pctx, n, metric_total);
+                       expr__add_id_val(pctx, n, metric_total);
                else
-                       expr__add_id(&pctx, n, avg_stats(stats)*scale);
+                       expr__add_id_val(pctx, n, avg_stats(stats)*scale);
        }
 
+       for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
+               ret = expr__add_ref(pctx, &metric_refs[j]);
+               if (ret)
+                       return ret;
+       }
+
+       return i;
+}
+
+static void generic_metric(struct perf_stat_config *config,
+                          const char *metric_expr,
+                          struct evsel **metric_events,
+                          struct metric_ref *metric_refs,
+                          char *name,
+                          const char *metric_name,
+                          const char *metric_unit,
+                          int runtime,
+                          int cpu,
+                          struct perf_stat_output_ctx *out,
+                          struct runtime_stat *st)
+{
+       print_metric_t print_metric = out->print_metric;
+       struct expr_parse_ctx pctx;
+       double ratio, scale;
+       int i;
+       void *ctxp = out->ctx;
+
+       i = prepare_metric(metric_events, metric_refs, &pctx, cpu, st);
+       if (i < 0)
+               return;
+
        if (!metric_events[i]) {
                if (expr__parse(&ratio, &pctx, metric_expr, runtime) == 0) {
                        char *unit;
@@ -827,6 +850,20 @@ static void generic_metric(struct perf_stat_config *config,
        expr__ctx_clear(&pctx);
 }
 
+double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
+{
+       struct expr_parse_ctx pctx;
+       double ratio;
+
+       if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
+               return 0.;
+
+       if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
+               return 0.;
+
+       return ratio;
+}
+
 void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                   struct evsel *evsel,
                                   double avg, int cpu,
@@ -1035,8 +1072,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                else
                        print_metric(config, ctxp, NULL, NULL, name, 0);
        } else if (evsel->metric_expr) {
-               generic_metric(config, evsel->metric_expr, evsel->metric_events, evsel->name,
-                               evsel->metric_name, NULL, 1, cpu, out, st);
+               generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
+                               evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
        } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
                char unit = 'M';
                char unit_buf[10];
@@ -1064,7 +1101,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        if (num++ > 0)
                                out->new_line(config, ctxp);
                        generic_metric(config, mexp->metric_expr, mexp->metric_events,
-                                       evsel->name, mexp->metric_name,
+                                       mexp->metric_refs, evsel->name, mexp->metric_name,
                                        mexp->metric_unit, mexp->runtime, cpu, out, st);
                }
        }
index f75ae67..f8778cf 100644 (file)
@@ -116,7 +116,7 @@ struct perf_stat_config {
        FILE                    *output;
        unsigned int             interval;
        unsigned int             timeout;
-       unsigned int             initial_delay;
+       int                      initial_delay;
        unsigned int             unit_width;
        unsigned int             metric_only_len;
        int                      times;
@@ -133,6 +133,8 @@ struct perf_stat_config {
        struct perf_cpu_map             *cpus_aggr_map;
        u64                     *walltime_run;
        struct rblist            metric_events;
+       int                      ctl_fd;
+       int                      ctl_fd_ack;
 };
 
 void perf_stat__set_big_num(int set);
@@ -230,4 +232,7 @@ perf_evlist__print_counters(struct evlist *evlist,
                            struct target *_target,
                            struct timespec *ts,
                            int argc, const char **argv);
+
+struct metric_expr;
+double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st);
 #endif
index 5e43054..8cc4b00 100644 (file)
@@ -789,7 +789,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
        if (ss->opdshdr.sh_type != SHT_PROGBITS)
                ss->opdsec = NULL;
 
-       if (dso->kernel == DSO_TYPE_USER)
+       if (dso->kernel == DSO_SPACE__USER)
                ss->adjust_symbols = true;
        else
                ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
@@ -872,7 +872,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
                 * kallsyms and identity maps.  Overwrite it to
                 * map to the kernel dso.
                 */
-               if (*remap_kernel && dso->kernel) {
+               if (*remap_kernel && dso->kernel && !kmodule) {
                        *remap_kernel = false;
                        map->start = shdr->sh_addr + ref_reloc(kmap);
                        map->end = map->start + shdr->sh_size;
@@ -1068,7 +1068,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
         * Initial kernel and module mappings do not map to the dso.
         * Flag the fixups.
         */
-       if (dso->kernel || kmodule) {
+       if (dso->kernel) {
                remap_kernel = true;
                adjust_kernel_syms = dso->adjust_symbols;
        }
@@ -1130,7 +1130,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                    (sym.st_value & 1))
                        --sym.st_value;
 
-               if (dso->kernel || kmodule) {
+               if (dso->kernel) {
                        if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
                                                       section_name, adjust_kernel_syms, kmodule, &remap_kernel))
                                goto out_elf_end;
index 5ddf84d..1f5fcb8 100644 (file)
@@ -666,6 +666,8 @@ static bool symbol__is_idle(const char *name)
                "poll_idle",
                "ppc64_runlatch_off",
                "pseries_dedicated_idle_sleep",
+               "psw_idle",
+               "psw_idle_exit",
                NULL
        };
        int i;
@@ -806,7 +808,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
 
                        if (strcmp(curr_map->dso->short_name, module)) {
                                if (curr_map != initial_map &&
-                                   dso->kernel == DSO_TYPE_GUEST_KERNEL &&
+                                   dso->kernel == DSO_SPACE__KERNEL_GUEST &&
                                    machine__is_default_guest(machine)) {
                                        /*
                                         * We assume all symbols of a module are
@@ -863,7 +865,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
                                goto add_symbol;
                        }
 
-                       if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+                       if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                                snprintf(dso_name, sizeof(dso_name),
                                        "[guest.kernel].%d",
                                        kernel_range++);
@@ -907,7 +909,7 @@ discard_symbol:
        }
 
        if (curr_map != initial_map &&
-           dso->kernel == DSO_TYPE_GUEST_KERNEL &&
+           dso->kernel == DSO_SPACE__KERNEL_GUEST &&
            machine__is_default_guest(kmaps->machine)) {
                dso__set_loaded(curr_map->dso);
        }
@@ -1385,7 +1387,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
         * Set the data type and long name so that kcore can be read via
         * dso__data_read_addr().
         */
-       if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+       if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
        else
                dso->binary_type = DSO_BINARY_TYPE__KCORE;
@@ -1449,7 +1451,7 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
        symbols__fixup_end(&dso->symbols);
        symbols__fixup_duplicate(&dso->symbols);
 
-       if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+       if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
        else
                dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
@@ -1535,17 +1537,17 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
        case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
        case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
        case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
-               return !kmod && dso->kernel == DSO_TYPE_USER;
+               return !kmod && dso->kernel == DSO_SPACE__USER;
 
        case DSO_BINARY_TYPE__KALLSYMS:
        case DSO_BINARY_TYPE__VMLINUX:
        case DSO_BINARY_TYPE__KCORE:
-               return dso->kernel == DSO_TYPE_KERNEL;
+               return dso->kernel == DSO_SPACE__KERNEL;
 
        case DSO_BINARY_TYPE__GUEST_KALLSYMS:
        case DSO_BINARY_TYPE__GUEST_VMLINUX:
        case DSO_BINARY_TYPE__GUEST_KCORE:
-               return dso->kernel == DSO_TYPE_GUEST_KERNEL;
+               return dso->kernel == DSO_SPACE__KERNEL_GUEST;
 
        case DSO_BINARY_TYPE__GUEST_KMODULE:
        case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
@@ -1563,6 +1565,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
 
        case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__BPF_IMAGE:
+       case DSO_BINARY_TYPE__OOL:
        case DSO_BINARY_TYPE__NOT_FOUND:
        default:
                return false;
@@ -1647,9 +1650,9 @@ int dso__load(struct dso *dso, struct map *map)
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 
        if (dso->kernel && !kmod) {
-               if (dso->kernel == DSO_TYPE_KERNEL)
+               if (dso->kernel == DSO_SPACE__KERNEL)
                        ret = dso__load_kernel_sym(dso, map);
-               else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+               else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                        ret = dso__load_guest_kernel_sym(dso, map);
 
                machine = map__kmaps(map)->machine;
@@ -1879,7 +1882,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
        else
                symbol__join_symfs(symfs_vmlinux, vmlinux);
 
-       if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+       if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
        else
                symtab_type = DSO_BINARY_TYPE__VMLINUX;
@@ -1891,7 +1894,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
        symsrc__destroy(&ss);
 
        if (err > 0) {
-               if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+               if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
                        dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
                else
                        dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
index 3fb67bd..bbbc0dc 100644 (file)
@@ -57,7 +57,8 @@ struct perf_tool {
                        throttle,
                        unthrottle,
                        ksymbol,
-                       bpf;
+                       bpf,
+                       text_poke;
 
        event_attr_op   attr;
        event_attr_op   event_update;
index 7570e36..cb16d2a 100755 (executable)
@@ -11,6 +11,7 @@ use File::Path qw(mkpath);
 use File::Copy qw(cp);
 use FileHandle;
 use FindBin;
+use IO::Handle;
 
 my $VERSION = "0.2";
 
@@ -81,6 +82,8 @@ my %default = (
     "IGNORE_UNUSED"            => 0,
 );
 
+my $test_log_start = 0;
+
 my $ktest_config = "ktest.conf";
 my $version;
 my $have_version = 0;
@@ -98,6 +101,7 @@ my $final_post_ktest;
 my $pre_ktest;
 my $post_ktest;
 my $pre_test;
+my $pre_test_die;
 my $post_test;
 my $pre_build;
 my $post_build;
@@ -223,6 +227,7 @@ my $dirname = $FindBin::Bin;
 my $mailto;
 my $mailer;
 my $mail_path;
+my $mail_max_size;
 my $mail_command;
 my $email_on_error;
 my $email_when_finished;
@@ -259,6 +264,7 @@ my %option_map = (
     "MAILTO"                   => \$mailto,
     "MAILER"                   => \$mailer,
     "MAIL_PATH"                        => \$mail_path,
+    "MAIL_MAX_SIZE"            => \$mail_max_size,
     "MAIL_COMMAND"             => \$mail_command,
     "EMAIL_ON_ERROR"           => \$email_on_error,
     "EMAIL_WHEN_FINISHED"      => \$email_when_finished,
@@ -273,6 +279,7 @@ my %option_map = (
     "PRE_KTEST"                        => \$pre_ktest,
     "POST_KTEST"               => \$post_ktest,
     "PRE_TEST"                 => \$pre_test,
+    "PRE_TEST_DIE"             => \$pre_test_die,
     "POST_TEST"                        => \$post_test,
     "BUILD_TYPE"               => \$build_type,
     "BUILD_OPTIONS"            => \$build_options,
@@ -507,9 +514,7 @@ EOF
 
 sub _logit {
     if (defined($opt{"LOG_FILE"})) {
-       open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
-       print OUT @_;
-       close(OUT);
+       print LOG @_;
     }
 }
 
@@ -909,6 +914,12 @@ sub process_expression {
        }
     }
 
+    if ($val =~ s/^\s*NOT\s+(.*)//) {
+       my $express = $1;
+       my $ret = process_expression($name, $express);
+       return !$ret;
+    }
+
     if ($val =~ /^\s*0\s*$/) {
        return 0;
     } elsif ($val =~ /^\s*\d+\s*$/) {
@@ -1485,8 +1496,32 @@ sub dodie {
 
     if ($email_on_error) {
        my $name = get_test_name;
+       my $log_file;
+
+       if (defined($opt{"LOG_FILE"})) {
+           my $whence = 0; # beginning of file
+           my $pos = $test_log_start;
+
+           if (defined($mail_max_size)) {
+               my $log_size = tell LOG;
+               $log_size -= $test_log_start;
+               if ($log_size > $mail_max_size) {
+                   $whence = 2; # end of file
+                   $pos = - $mail_max_size;
+               }
+           }
+           $log_file = "$tmpdir/log";
+           open (L, "$opt{LOG_FILE}") or die "Can't open $opt{LOG_FILE} to read)";
+           open (O, "> $tmpdir/log") or die "Can't open $tmpdir/log\n";
+           seek(L, $pos, $whence);
+           while (<L>) {
+               print O;
+           }
+           close O;
+           close L;
+       }
         send_email("KTEST: critical failure for test $i [$name]",
-                "Your test started at $script_start_time has failed with:\n@_\n");
+                "Your test started at $script_start_time has failed with:\n@_\n", $log_file);
     }
 
     if ($monitor_cnt) {
@@ -1508,7 +1543,7 @@ sub create_pty {
     my $TIOCGPTN = 0x80045430;
 
     sysopen($ptm, "/dev/ptmx", O_RDWR | O_NONBLOCK) or
-       dodie "Cant open /dev/ptmx";
+       dodie "Can't open /dev/ptmx";
 
     # unlockpt()
     $tmp = pack("i", 0);
@@ -1772,8 +1807,6 @@ sub run_command {
        (fail "unable to exec $command" and return 0);
 
     if (defined($opt{"LOG_FILE"})) {
-       open(LOG, ">>$opt{LOG_FILE}") or
-           dodie "failed to write to log";
        $dolog = 1;
     }
 
@@ -1821,7 +1854,6 @@ sub run_command {
     }
 
     close(CMD);
-    close(LOG) if ($dolog);
     close(RD)  if ($dord);
 
     $end_time = time;
@@ -3188,6 +3220,8 @@ sub config_bisect_end {
     doprint "***************************************\n\n";
 }
 
+my $pass = 1;
+
 sub run_config_bisect {
     my ($good, $bad, $last_result) = @_;
     my $reset = "";
@@ -3210,11 +3244,15 @@ sub run_config_bisect {
 
     $ret = run_config_bisect_test $config_bisect_type;
     if ($ret) {
-        doprint "NEW GOOD CONFIG\n";
+        doprint "NEW GOOD CONFIG ($pass)\n";
+       system("cp $output_config $tmpdir/good_config.tmp.$pass");
+       $pass++;
        # Return 3 for good config
        return 3;
     } else {
-        doprint "NEW BAD CONFIG\n";
+        doprint "NEW BAD CONFIG ($pass)\n";
+       system("cp $output_config $tmpdir/bad_config.tmp.$pass");
+       $pass++;
        # Return 4 for bad config
        return 4;
     }
@@ -4077,8 +4115,12 @@ if ($#new_configs >= 0) {
     }
 }
 
-if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) {
-    unlink $opt{"LOG_FILE"};
+if (defined($opt{"LOG_FILE"})) {
+    if ($opt{"CLEAR_LOG"}) {
+       unlink $opt{"LOG_FILE"};
+    }
+    open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+    LOG->autoflush(1);
 }
 
 doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
@@ -4171,7 +4213,7 @@ sub find_mailer {
 }
 
 sub do_send_mail {
-    my ($subject, $message) = @_;
+    my ($subject, $message, $file) = @_;
 
     if (!defined($mail_path)) {
        # find the mailer
@@ -4181,16 +4223,30 @@ sub do_send_mail {
        }
     }
 
+    my $header_file = "$tmpdir/header";
+    open (HEAD, ">$header_file") or die "Can not create $header_file\n";
+    print HEAD "To: $mailto\n";
+    print HEAD "Subject: $subject\n\n";
+    print HEAD "$message\n";
+    close HEAD;
+
     if (!defined($mail_command)) {
        if ($mailer eq "mail" || $mailer eq "mailx") {
-           $mail_command = "\$MAIL_PATH/\$MAILER -s \'\$SUBJECT\' \$MAILTO <<< \'\$MESSAGE\'";
+           $mail_command = "cat \$HEADER_FILE \$BODY_FILE | \$MAIL_PATH/\$MAILER -s \'\$SUBJECT\' \$MAILTO";
        } elsif ($mailer eq "sendmail" ) {
-           $mail_command =  "echo \'Subject: \$SUBJECT\n\n\$MESSAGE\' | \$MAIL_PATH/\$MAILER -t \$MAILTO";
+           $mail_command =  "cat \$HEADER_FILE \$BODY_FILE | \$MAIL_PATH/\$MAILER -t \$MAILTO";
        } else {
            die "\nYour mailer: $mailer is not supported.\n";
        }
     }
 
+    if (defined($file)) {
+       $mail_command =~ s/\$BODY_FILE/$file/g;
+    } else {
+       $mail_command =~ s/\$BODY_FILE//g;
+    }
+
+    $mail_command =~ s/\$HEADER_FILE/$header_file/g;
     $mail_command =~ s/\$MAILER/$mailer/g;
     $mail_command =~ s/\$MAIL_PATH/$mail_path/g;
     $mail_command =~ s/\$MAILTO/$mailto/g;
@@ -4338,10 +4394,19 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
     }
 
     doprint "\n\n";
+
+    if (defined($opt{"LOG_FILE"})) {
+       $test_log_start = tell(LOG);
+    }
+
     doprint "RUNNING TEST $i of $opt{NUM_TESTS}$name with option $test_type $run_type$installme\n\n";
 
     if (defined($pre_test)) {
-       run_command $pre_test;
+       my $ret = run_command $pre_test;
+       if (!$ret && defined($pre_test_die) &&
+           $pre_test_die) {
+           dodie "failed to pre_test\n";
+       }
     }
 
     unlink $dmesg;
@@ -4441,4 +4506,10 @@ if ($email_when_finished) {
     send_email("KTEST: Your test has finished!",
             "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
 }
+
+if (defined($opt{"LOG_FILE"})) {
+    print "\n See $opt{LOG_FILE} for the record of results.\n\n";
+    close LOG;
+}
+
 exit 0;
index 27666b8..5e7d1d7 100644 (file)
 # Users can cancel the test by Ctrl^C
 # (default 0)
 #EMAIL_WHEN_CANCELED = 1
+#
+# If a test ends with an error and EMAIL_ON_ERROR is set as well
+# as a LOG_FILE is defined, then the log of the failing test will
+# be included in the email that is sent.
+# It is possible that the log may be very large, in which case,
+# only the last amount of the log should be sent. To limit how
+# much of the log is sent, set MAIL_MAX_SIZE. This will be the
+# size in bytes of the last portion of the log of the failed
+# test file. That is, if this is set to 100000, then only the
+# last 100 thousand bytes of the log file will be included in
+# the email.
+# (default undef)
+#MAIL_MAX_SIZE = 1000000
 
 # Start a test setup. If you leave this off, all options
 # will be default and the test will run once.
 # default (undefined)
 #PRE_TEST = ${SSH} reboot_to_special_kernel
 
+# To kill the entire test if PRE_TEST is defined but fails set this
+# to 1.
+# (default 0)
+#PRE_TEST_DIE = 1
+
 # If there is a command you want to run after the individual test case
 # completes, then you can set this option.
 #
index a8ee5c4..a1a5dc6 100644 (file)
@@ -173,6 +173,9 @@ struct nfit_test_fw {
        u64 version;
        u32 size_received;
        u64 end_time;
+       bool armed;
+       bool missed_activate;
+       unsigned long last_activate;
 };
 
 struct nfit_test {
@@ -345,7 +348,7 @@ static int nd_intel_test_finish_fw(struct nfit_test *t,
                        __func__, t, nd_cmd, buf_len, idx);
 
        if (fw->state == FW_STATE_UPDATED) {
-               /* update already done, need cold boot */
+               /* update already done, need activation */
                nd_cmd->status = 0x20007;
                return 0;
        }
@@ -430,6 +433,7 @@ static int nd_intel_test_finish_query(struct nfit_test *t,
                }
                dev_dbg(dev, "%s: transition out verify\n", __func__);
                fw->state = FW_STATE_UPDATED;
+               fw->missed_activate = false;
                /* fall through */
        case FW_STATE_UPDATED:
                nd_cmd->status = 0;
@@ -1178,6 +1182,134 @@ static int nd_intel_test_cmd_master_secure_erase(struct nfit_test *t,
        return 0;
 }
 
+static unsigned long last_activate;
+
+static int nvdimm_bus_intel_fw_activate_businfo(struct nfit_test *t,
+               struct nd_intel_bus_fw_activate_businfo *nd_cmd,
+               unsigned int buf_len)
+{
+       int i, armed = 0;
+       int state;
+       u64 tmo;
+
+       for (i = 0; i < NUM_DCR; i++) {
+               struct nfit_test_fw *fw = &t->fw[i];
+
+               if (fw->armed)
+                       armed++;
+       }
+
+       /*
+        * Emulate 3 second activation max, and 1 second incremental
+        * quiesce time per dimm requiring multiple activates to get all
+        * DIMMs updated.
+        */
+       if (armed)
+               state = ND_INTEL_FWA_ARMED;
+       else if (!last_activate || time_after(jiffies, last_activate + 3 * HZ))
+               state = ND_INTEL_FWA_IDLE;
+       else
+               state = ND_INTEL_FWA_BUSY;
+
+       tmo = armed * USEC_PER_SEC;
+       *nd_cmd = (struct nd_intel_bus_fw_activate_businfo) {
+               .capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
+                       | ND_INTEL_BUS_FWA_CAP_OSQUIESCE
+                       | ND_INTEL_BUS_FWA_CAP_RESET,
+               .state = state,
+               .activate_tmo = tmo,
+               .cpu_quiesce_tmo = tmo,
+               .io_quiesce_tmo = tmo,
+               .max_quiesce_tmo = 3 * USEC_PER_SEC,
+       };
+
+       return 0;
+}
+
+static int nvdimm_bus_intel_fw_activate(struct nfit_test *t,
+               struct nd_intel_bus_fw_activate *nd_cmd,
+               unsigned int buf_len)
+{
+       struct nd_intel_bus_fw_activate_businfo info;
+       u32 status = 0;
+       int i;
+
+       nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
+       if (info.state == ND_INTEL_FWA_BUSY)
+               status = ND_INTEL_BUS_FWA_STATUS_BUSY;
+       else if (info.activate_tmo > info.max_quiesce_tmo)
+               status = ND_INTEL_BUS_FWA_STATUS_TMO;
+       else if (info.state == ND_INTEL_FWA_IDLE)
+               status = ND_INTEL_BUS_FWA_STATUS_NOARM;
+
+       dev_dbg(&t->pdev.dev, "status: %d\n", status);
+       nd_cmd->status = status;
+       if (status && status != ND_INTEL_BUS_FWA_STATUS_TMO)
+               return 0;
+
+       last_activate = jiffies;
+       for (i = 0; i < NUM_DCR; i++) {
+               struct nfit_test_fw *fw = &t->fw[i];
+
+               if (!fw->armed)
+                       continue;
+               if (fw->state != FW_STATE_UPDATED)
+                       fw->missed_activate = true;
+               else
+                       fw->state = FW_STATE_NEW;
+               fw->armed = false;
+               fw->last_activate = last_activate;
+       }
+
+       return 0;
+}
+
+static int nd_intel_test_cmd_fw_activate_dimminfo(struct nfit_test *t,
+               struct nd_intel_fw_activate_dimminfo *nd_cmd,
+               unsigned int buf_len, int dimm)
+{
+       struct nd_intel_bus_fw_activate_businfo info;
+       struct nfit_test_fw *fw = &t->fw[dimm];
+       u32 result, state;
+
+       nvdimm_bus_intel_fw_activate_businfo(t, &info, sizeof(info));
+
+       if (info.state == ND_INTEL_FWA_BUSY)
+               state = ND_INTEL_FWA_BUSY;
+       else if (info.state == ND_INTEL_FWA_IDLE)
+               state = ND_INTEL_FWA_IDLE;
+       else if (fw->armed)
+               state = ND_INTEL_FWA_ARMED;
+       else
+               state = ND_INTEL_FWA_IDLE;
+
+       result = ND_INTEL_DIMM_FWA_NONE;
+       if (last_activate && fw->last_activate == last_activate &&
+                       state == ND_INTEL_FWA_IDLE) {
+               if (fw->missed_activate)
+                       result = ND_INTEL_DIMM_FWA_NOTSTAGED;
+               else
+                       result = ND_INTEL_DIMM_FWA_SUCCESS;
+       }
+
+       *nd_cmd = (struct nd_intel_fw_activate_dimminfo) {
+               .result = result,
+               .state = state,
+       };
+
+       return 0;
+}
+
+static int nd_intel_test_cmd_fw_activate_arm(struct nfit_test *t,
+               struct nd_intel_fw_activate_arm *nd_cmd,
+               unsigned int buf_len, int dimm)
+{
+       struct nfit_test_fw *fw = &t->fw[dimm];
+
+       fw->armed = nd_cmd->activate_arm == ND_INTEL_DIMM_FWA_ARM;
+       nd_cmd->status = 0;
+       return 0;
+}
 
 static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
 {
@@ -1192,6 +1324,29 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
        return i;
 }
 
+static void nfit_ctl_dbg(struct acpi_nfit_desc *acpi_desc,
+               struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+               unsigned int len)
+{
+       struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
+       unsigned int func = cmd;
+       unsigned int family = 0;
+
+       if (cmd == ND_CMD_CALL) {
+               struct nd_cmd_pkg *pkg = buf;
+
+               len = pkg->nd_size_in;
+               family = pkg->nd_family;
+               buf = pkg->nd_payload;
+               func = pkg->nd_command;
+       }
+       dev_dbg(&t->pdev.dev, "%s family: %d cmd: %d: func: %d input length: %d\n",
+                       nvdimm ? nvdimm_name(nvdimm) : "bus", family, cmd, func,
+                       len);
+       print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 16, 4,
+                       buf, min(len, 256u), true);
+}
+
 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                struct nvdimm *nvdimm, unsigned int cmd, void *buf,
                unsigned int buf_len, int *cmd_rc)
@@ -1205,6 +1360,8 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                cmd_rc = &__cmd_rc;
        *cmd_rc = 0;
 
+       nfit_ctl_dbg(acpi_desc, nvdimm, cmd, buf, buf_len);
+
        if (nvdimm) {
                struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
                unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -1224,6 +1381,11 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                        i = get_dimm(nfit_mem, func);
                        if (i < 0)
                                return i;
+                       if (i >= NUM_DCR) {
+                               dev_WARN_ONCE(&t->pdev.dev, 1,
+                                               "ND_CMD_CALL only valid for nfit_test0\n");
+                               return -EINVAL;
+                       }
 
                        switch (func) {
                        case NVDIMM_INTEL_GET_SECURITY_STATE:
@@ -1252,11 +1414,11 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                break;
                        case NVDIMM_INTEL_OVERWRITE:
                                rc = nd_intel_test_cmd_overwrite(t,
-                                               buf, buf_len, i - t->dcr_idx);
+                                               buf, buf_len, i);
                                break;
                        case NVDIMM_INTEL_QUERY_OVERWRITE:
                                rc = nd_intel_test_cmd_query_overwrite(t,
-                                               buf, buf_len, i - t->dcr_idx);
+                                               buf, buf_len, i);
                                break;
                        case NVDIMM_INTEL_SET_MASTER_PASSPHRASE:
                                rc = nd_intel_test_cmd_master_set_pass(t,
@@ -1266,54 +1428,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                rc = nd_intel_test_cmd_master_secure_erase(t,
                                                buf, buf_len, i);
                                break;
+                       case NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO:
+                               rc = nd_intel_test_cmd_fw_activate_dimminfo(
+                                       t, buf, buf_len, i);
+                               break;
+                       case NVDIMM_INTEL_FW_ACTIVATE_ARM:
+                               rc = nd_intel_test_cmd_fw_activate_arm(
+                                       t, buf, buf_len, i);
+                               break;
                        case ND_INTEL_ENABLE_LSS_STATUS:
                                rc = nd_intel_test_cmd_set_lss_status(t,
                                                buf, buf_len);
                                break;
                        case ND_INTEL_FW_GET_INFO:
                                rc = nd_intel_test_get_fw_info(t, buf,
-                                               buf_len, i - t->dcr_idx);
+                                               buf_len, i);
                                break;
                        case ND_INTEL_FW_START_UPDATE:
                                rc = nd_intel_test_start_update(t, buf,
-                                               buf_len, i - t->dcr_idx);
+                                               buf_len, i);
                                break;
                        case ND_INTEL_FW_SEND_DATA:
                                rc = nd_intel_test_send_data(t, buf,
-                                               buf_len, i - t->dcr_idx);
+                                               buf_len, i);
                                break;
                        case ND_INTEL_FW_FINISH_UPDATE:
                                rc = nd_intel_test_finish_fw(t, buf,
-                                               buf_len, i - t->dcr_idx);
+                                               buf_len, i);
                                break;
                        case ND_INTEL_FW_FINISH_QUERY:
                                rc = nd_intel_test_finish_query(t, buf,
-                                               buf_len, i - t->dcr_idx);
+                                               buf_len, i);
                                break;
                        case ND_INTEL_SMART:
                                rc = nfit_test_cmd_smart(buf, buf_len,
-                                               &t->smart[i - t->dcr_idx]);
+                                               &t->smart[i]);
                                break;
                        case ND_INTEL_SMART_THRESHOLD:
                                rc = nfit_test_cmd_smart_threshold(buf,
                                                buf_len,
-                                               &t->smart_threshold[i -
-                                                       t->dcr_idx]);
+                                               &t->smart_threshold[i]);
                                break;
                        case ND_INTEL_SMART_SET_THRESHOLD:
                                rc = nfit_test_cmd_smart_set_threshold(buf,
                                                buf_len,
-                                               &t->smart_threshold[i -
-                                                       t->dcr_idx],
-                                               &t->smart[i - t->dcr_idx],
+                                               &t->smart_threshold[i],
+                                               &t->smart[i],
                                                &t->pdev.dev, t->dimm_dev[i]);
                                break;
                        case ND_INTEL_SMART_INJECT:
                                rc = nfit_test_cmd_smart_inject(buf,
                                                buf_len,
-                                               &t->smart_threshold[i -
-                                                       t->dcr_idx],
-                                               &t->smart[i - t->dcr_idx],
+                                               &t->smart_threshold[i],
+                                               &t->smart[i],
                                                &t->pdev.dev, t->dimm_dev[i]);
                                break;
                        default:
@@ -1353,9 +1520,9 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                if (!nd_desc)
                        return -ENOTTY;
 
-               if (cmd == ND_CMD_CALL) {
+               if (cmd == ND_CMD_CALL && call_pkg->nd_family
+                               == NVDIMM_BUS_FAMILY_NFIT) {
                        func = call_pkg->nd_command;
-
                        buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
                        buf = (void *) call_pkg->nd_payload;
 
@@ -1379,7 +1546,26 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                        default:
                                return -ENOTTY;
                        }
-               }
+               } else if (cmd == ND_CMD_CALL && call_pkg->nd_family
+                               == NVDIMM_BUS_FAMILY_INTEL) {
+                       func = call_pkg->nd_command;
+                       buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
+                       buf = (void *) call_pkg->nd_payload;
+
+                       switch (func) {
+                       case NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO:
+                               rc = nvdimm_bus_intel_fw_activate_businfo(t,
+                                               buf, buf_len);
+                               return rc;
+                       case NVDIMM_BUS_INTEL_FW_ACTIVATE:
+                               rc = nvdimm_bus_intel_fw_activate(t, buf,
+                                               buf_len);
+                               return rc;
+                       default:
+                               return -ENOTTY;
+                       }
+               } else if (cmd == ND_CMD_CALL)
+                       return -ENOTTY;
 
                if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
                        return -ENOTTY;
@@ -1805,6 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        struct acpi_nfit_flush_address *flush;
        struct acpi_nfit_capabilities *pcap;
        unsigned int offset = 0, i;
+       unsigned long *acpi_mask;
 
        /*
         * spa0 (interleave first half of dimm0 and dimm1, note storage
@@ -2507,10 +2694,10 @@ static void nfit_test0_setup(struct nfit_test *t)
        set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
-       set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
-       set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
-       set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
-       set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
+       set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_dsm_mask);
+       set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_dsm_mask);
+       set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_dsm_mask);
+       set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_dsm_mask);
        set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
        set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
        set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
@@ -2531,6 +2718,12 @@ static void nfit_test0_setup(struct nfit_test *t)
                        &acpi_desc->dimm_cmd_force_en);
        set_bit(NVDIMM_INTEL_MASTER_SECURE_ERASE,
                        &acpi_desc->dimm_cmd_force_en);
+       set_bit(NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, &acpi_desc->dimm_cmd_force_en);
+       set_bit(NVDIMM_INTEL_FW_ACTIVATE_ARM, &acpi_desc->dimm_cmd_force_en);
+
+       acpi_mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
+       set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, acpi_mask);
+       set_bit(NVDIMM_BUS_INTEL_FW_ACTIVATE, acpi_mask);
 }
 
 static void nfit_test1_setup(struct nfit_test *t)
@@ -2699,14 +2892,18 @@ static int nfit_ctl_test(struct device *dev)
        struct acpi_nfit_desc *acpi_desc;
        const u64 test_val = 0x0123456789abcdefULL;
        unsigned long mask, cmd_size, offset;
-       union {
-               struct nd_cmd_get_config_size cfg_size;
-               struct nd_cmd_clear_error clear_err;
-               struct nd_cmd_ars_status ars_stat;
-               struct nd_cmd_ars_cap ars_cap;
-               char buf[sizeof(struct nd_cmd_ars_status)
-                       + sizeof(struct nd_ars_record)];
-       } cmds;
+       struct nfit_ctl_test_cmd {
+               struct nd_cmd_pkg pkg;
+               union {
+                       struct nd_cmd_get_config_size cfg_size;
+                       struct nd_cmd_clear_error clear_err;
+                       struct nd_cmd_ars_status ars_stat;
+                       struct nd_cmd_ars_cap ars_cap;
+                       struct nd_intel_bus_fw_activate_businfo fwa_info;
+                       char buf[sizeof(struct nd_cmd_ars_status)
+                               + sizeof(struct nd_ars_record)];
+               };
+       } cmd;
 
        adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
        if (!adev)
@@ -2731,11 +2928,15 @@ static int nfit_ctl_test(struct device *dev)
                        .module = THIS_MODULE,
                        .provider_name = "ACPI.NFIT",
                        .ndctl = acpi_nfit_ctl,
-                       .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
-                               | 1UL << NFIT_CMD_ARS_INJECT_SET
-                               | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
-                               | 1UL << NFIT_CMD_ARS_INJECT_GET,
+                       .bus_family_mask = 1UL << NVDIMM_BUS_FAMILY_NFIT
+                               | 1UL << NVDIMM_BUS_FAMILY_INTEL,
                },
+               .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
+                       | 1UL << NFIT_CMD_ARS_INJECT_SET
+                       | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
+                       | 1UL << NFIT_CMD_ARS_INJECT_GET,
+               .family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL] =
+                       NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK,
                .dev = &adev->dev,
        };
 
@@ -2766,21 +2967,21 @@ static int nfit_ctl_test(struct device *dev)
 
 
        /* basic checkout of a typical 'get config size' command */
-       cmd_size = sizeof(cmds.cfg_size);
-       cmds.cfg_size = (struct nd_cmd_get_config_size) {
+       cmd_size = sizeof(cmd.cfg_size);
+       cmd.cfg_size = (struct nd_cmd_get_config_size) {
                .status = 0,
                .config_size = SZ_128K,
                .max_xfer = SZ_4K,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
-       if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
-                       || cmds.cfg_size.config_size != SZ_128K
-                       || cmds.cfg_size.max_xfer != SZ_4K) {
+       if (rc < 0 || cmd_rc || cmd.cfg_size.status != 0
+                       || cmd.cfg_size.config_size != SZ_128K
+                       || cmd.cfg_size.max_xfer != SZ_4K) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
                                __func__, __LINE__, rc, cmd_rc);
                return -EIO;
@@ -2789,14 +2990,14 @@ static int nfit_ctl_test(struct device *dev)
 
        /* test ars_status with zero output */
        cmd_size = offsetof(struct nd_cmd_ars_status, address);
-       cmds.ars_stat = (struct nd_cmd_ars_status) {
+       cmd.ars_stat = (struct nd_cmd_ars_status) {
                .out_length = 0,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
        if (rc < 0 || cmd_rc) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
@@ -2806,16 +3007,16 @@ static int nfit_ctl_test(struct device *dev)
 
 
        /* test ars_cap with benign extended status */
-       cmd_size = sizeof(cmds.ars_cap);
-       cmds.ars_cap = (struct nd_cmd_ars_cap) {
+       cmd_size = sizeof(cmd.ars_cap);
+       cmd.ars_cap = (struct nd_cmd_ars_cap) {
                .status = ND_ARS_PERSISTENT << 16,
        };
        offset = offsetof(struct nd_cmd_ars_cap, status);
-       rc = setup_result(cmds.buf + offset, cmd_size - offset);
+       rc = setup_result(cmd.buf + offset, cmd_size - offset);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
        if (rc < 0 || cmd_rc) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
@@ -2825,19 +3026,19 @@ static int nfit_ctl_test(struct device *dev)
 
 
        /* test ars_status with 'status' trimmed from 'out_length' */
-       cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
-       cmds.ars_stat = (struct nd_cmd_ars_status) {
+       cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
+       cmd.ars_stat = (struct nd_cmd_ars_status) {
                .out_length = cmd_size - 4,
        };
-       record = &cmds.ars_stat.records[0];
+       record = &cmd.ars_stat.records[0];
        *record = (struct nd_ars_record) {
                .length = test_val,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
        if (rc < 0 || cmd_rc || record->length != test_val) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
@@ -2847,19 +3048,19 @@ static int nfit_ctl_test(struct device *dev)
 
 
        /* test ars_status with 'Output (Size)' including 'status' */
-       cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
-       cmds.ars_stat = (struct nd_cmd_ars_status) {
+       cmd_size = sizeof(cmd.ars_stat) + sizeof(struct nd_ars_record);
+       cmd.ars_stat = (struct nd_cmd_ars_status) {
                .out_length = cmd_size,
        };
-       record = &cmds.ars_stat.records[0];
+       record = &cmd.ars_stat.records[0];
        *record = (struct nd_ars_record) {
                .length = test_val,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
        if (rc < 0 || cmd_rc || record->length != test_val) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
@@ -2869,15 +3070,15 @@ static int nfit_ctl_test(struct device *dev)
 
 
        /* test extended status for get_config_size results in failure */
-       cmd_size = sizeof(cmds.cfg_size);
-       cmds.cfg_size = (struct nd_cmd_get_config_size) {
+       cmd_size = sizeof(cmd.cfg_size);
+       cmd.cfg_size = (struct nd_cmd_get_config_size) {
                .status = 1 << 16,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
 
        if (rc < 0 || cmd_rc >= 0) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
@@ -2886,16 +3087,46 @@ static int nfit_ctl_test(struct device *dev)
        }
 
        /* test clear error */
-       cmd_size = sizeof(cmds.clear_err);
-       cmds.clear_err = (struct nd_cmd_clear_error) {
+       cmd_size = sizeof(cmd.clear_err);
+       cmd.clear_err = (struct nd_cmd_clear_error) {
                .length = 512,
                .cleared = 512,
        };
-       rc = setup_result(cmds.buf, cmd_size);
+       rc = setup_result(cmd.buf, cmd_size);
        if (rc)
                return rc;
        rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
-                       cmds.buf, cmd_size, &cmd_rc);
+                       cmd.buf, cmd_size, &cmd_rc);
+       if (rc < 0 || cmd_rc) {
+               dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
+                               __func__, __LINE__, rc, cmd_rc);
+               return -EIO;
+       }
+
+       /* test firmware activate bus info */
+       cmd_size = sizeof(cmd.fwa_info);
+       cmd = (struct nfit_ctl_test_cmd) {
+               .pkg = {
+                       .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
+                       .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+                       .nd_size_out = cmd_size,
+                       .nd_fw_size = cmd_size,
+               },
+               .fwa_info = {
+                       .state = ND_INTEL_FWA_IDLE,
+                       .capability = ND_INTEL_BUS_FWA_CAP_FWQUIESCE
+                               | ND_INTEL_BUS_FWA_CAP_OSQUIESCE,
+                       .activate_tmo = 1,
+                       .cpu_quiesce_tmo = 1,
+                       .io_quiesce_tmo = 1,
+                       .max_quiesce_tmo = 1,
+               },
+       };
+       rc = setup_result(cmd.buf, cmd_size);
+       if (rc)
+               return rc;
+       rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CALL,
+                       &cmd, sizeof(cmd.pkg) + cmd_size, &cmd_rc);
        if (rc < 0 || cmd_rc) {
                dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
                                __func__, __LINE__, rc, cmd_rc);
index e7a8cf8..a83b582 100644 (file)
@@ -102,7 +102,7 @@ endif
 OVERRIDE_TARGETS := 1
 override define CLEAN
        $(call msg,CLEAN)
-       $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
+       $(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
 endef
 
 include ../lib.mk
@@ -123,17 +123,21 @@ $(notdir $(TEST_GEN_PROGS)                                                \
         $(TEST_GEN_PROGS_EXTENDED)                                     \
         $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
 
+$(OUTPUT)/%.o: %.c
+       $(call msg,CC,,$@)
+       $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
+
 $(OUTPUT)/%:%.c
        $(call msg,BINARY,,$@)
-       $(LINK.c) $^ $(LDLIBS) -o $@
+       $(Q)$(LINK.c) $^ $(LDLIBS) -o $@
 
 $(OUTPUT)/urandom_read: urandom_read.c
        $(call msg,BINARY,,$@)
-       $(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
+       $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
 
 $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
        $(call msg,CC,,$@)
-       $(CC) -c $(CFLAGS) -o $@ $<
+       $(Q)$(CC) -c $(CFLAGS) -o $@ $<
 
 VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                           \
                     $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)    \
@@ -142,7 +146,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                                \
                     /boot/vmlinux-$(shell uname -r)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
 
-$(OUTPUT)/runqslower: $(BPFOBJ)
+DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
+
+$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL)
        $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower     \
                    OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF)   \
                    BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) &&      \
@@ -164,7 +170,6 @@ $(OUTPUT)/test_netcnt: cgroup_helpers.c
 $(OUTPUT)/test_sock_fields: cgroup_helpers.c
 $(OUTPUT)/test_sysctl: cgroup_helpers.c
 
-DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
 BPFTOOL ?= $(DEFAULT_BPFTOOL)
 $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile)    \
                    $(BPFOBJ) | $(BUILD_DIR)/bpftool
@@ -180,15 +185,15 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                       \
 
 $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
        $(call msg,MKDIR,,$@)
-       mkdir -p $@
+       $(Q)mkdir -p $@
 
 $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
 ifeq ($(VMLINUX_H),)
        $(call msg,GEN,,$@)
-       $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+       $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
 else
        $(call msg,CP,,$@)
-       cp "$(VMLINUX_H)" $@
+       $(Q)cp "$(VMLINUX_H)" $@
 endif
 
 $(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids     \
@@ -237,28 +242,28 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
 # $4 - LDFLAGS
 define CLANG_BPF_BUILD_RULE
        $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
-       ($(CLANG) $3 -O2 -target bpf -emit-llvm                         \
+       $(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm                     \
                -c $1 -o - || echo "BPF obj compilation failed") |      \
        $(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
 endef
 # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
 define CLANG_NOALU32_BPF_BUILD_RULE
        $(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
-       ($(CLANG) $3 -O2 -target bpf -emit-llvm                         \
+       $(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm                     \
                -c $1 -o - || echo "BPF obj compilation failed") |      \
        $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
 endef
 # Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
 define CLANG_NATIVE_BPF_BUILD_RULE
        $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
-       ($(CLANG) $3 -O2 -emit-llvm                                     \
+       $(Q)($(CLANG) $3 -O2 -emit-llvm                                 \
                -c $1 -o - || echo "BPF obj compilation failed") |      \
        $(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
 endef
 # Build BPF object using GCC
 define GCC_BPF_BUILD_RULE
        $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
-       $(BPF_GCC) $3 $4 -O2 -c $1 -o $2
+       $(Q)$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
 endef
 
 SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
@@ -300,7 +305,7 @@ ifeq ($($(TRUNNER_OUTPUT)-dir),)
 $(TRUNNER_OUTPUT)-dir := y
 $(TRUNNER_OUTPUT):
        $$(call msg,MKDIR,,$$@)
-       mkdir -p $$@
+       $(Q)mkdir -p $$@
 endif
 
 # ensure we set up BPF objects generation rule just once for a given
@@ -320,7 +325,7 @@ $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:                   \
                      $(TRUNNER_OUTPUT)/%.o                             \
                      | $(BPFTOOL) $(TRUNNER_OUTPUT)
        $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
-       $$(BPFTOOL) gen skeleton $$< > $$@
+       $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
 endif
 
 # ensure we set up tests.h header generation rule just once
@@ -344,7 +349,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o:                   \
                      $(TRUNNER_BPF_SKELS)                              \
                      $$(BPFOBJ) | $(TRUNNER_OUTPUT)
        $$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
-       cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
+       $(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
 
 $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o:                          \
                       %.c                                              \
@@ -352,13 +357,13 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o:                             \
                       $(TRUNNER_TESTS_HDR)                             \
                       $$(BPFOBJ) | $(TRUNNER_OUTPUT)
        $$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
-       $$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
+       $(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
 
 # only copy extra resources if in flavored build
 $(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
 ifneq ($2,)
        $$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
-       cp -a $$^ $(TRUNNER_OUTPUT)/
+       $(Q)cp -a $$^ $(TRUNNER_OUTPUT)/
 endif
 
 $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS)                      \
@@ -366,8 +371,8 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS)                   \
                             $(RESOLVE_BTFIDS)                          \
                             | $(TRUNNER_BINARY)-extras
        $$(call msg,BINARY,,$$@)
-       $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
-       $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
+       $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
+       $(Q)$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
 
 endef
 
@@ -420,17 +425,17 @@ verifier/tests.h: verifier/*.c
                ) > verifier/tests.h)
 $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
        $(call msg,BINARY,,$@)
-       $(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
+       $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
 
 # Make sure we are able to include and link libbpf against c++.
 $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
        $(call msg,CXX,,$@)
-       $(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
+       $(Q)$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
 
 # Benchmark runner
 $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
        $(call msg,CC,,$@)
-       $(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
+       $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
 $(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
 $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
 $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
@@ -443,7 +448,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
                 $(OUTPUT)/bench_trigger.o \
                 $(OUTPUT)/bench_ringbufs.o
        $(call msg,BINARY,,$@)
-       $(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
+       $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
 
 EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR)                     \
        prog_tests/tests.h map_tests/tests.h verifier/tests.h           \
index 4ffefdc..7375d9a 100644 (file)
@@ -468,6 +468,7 @@ static void test_bpf_hash_map(void)
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        struct bpf_iter_bpf_hash_map *skel;
        int err, i, len, map_fd, iter_fd;
+       union bpf_iter_link_info linfo;
        __u64 val, expected_val = 0;
        struct bpf_link *link;
        struct key_t {
@@ -490,13 +491,16 @@ static void test_bpf_hash_map(void)
                goto out;
 
        /* iterator with hashmap2 and hashmap3 should fail */
-       opts.map_fd = bpf_map__fd(skel->maps.hashmap2);
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
        if (CHECK(!IS_ERR(link), "attach_iter",
                  "attach_iter for hashmap2 unexpected succeeded\n"))
                goto out;
 
-       opts.map_fd = bpf_map__fd(skel->maps.hashmap3);
+       linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
        if (CHECK(!IS_ERR(link), "attach_iter",
                  "attach_iter for hashmap3 unexpected succeeded\n"))
@@ -519,7 +523,7 @@ static void test_bpf_hash_map(void)
                        goto out;
        }
 
-       opts.map_fd = map_fd;
+       linfo.map.map_fd = map_fd;
        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
        if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
                goto out;
@@ -562,6 +566,7 @@ static void test_bpf_percpu_hash_map(void)
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        struct bpf_iter_bpf_percpu_hash_map *skel;
        int err, i, j, len, map_fd, iter_fd;
+       union bpf_iter_link_info linfo;
        __u32 expected_val = 0;
        struct bpf_link *link;
        struct key_t {
@@ -606,7 +611,10 @@ static void test_bpf_percpu_hash_map(void)
                        goto out;
        }
 
-       opts.map_fd = map_fd;
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = map_fd;
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
        if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
                goto out;
@@ -649,6 +657,7 @@ static void test_bpf_array_map(void)
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        __u32 expected_key = 0, res_first_key;
        struct bpf_iter_bpf_array_map *skel;
+       union bpf_iter_link_info linfo;
        int err, i, map_fd, iter_fd;
        struct bpf_link *link;
        char buf[64] = {};
@@ -673,7 +682,10 @@ static void test_bpf_array_map(void)
                        goto out;
        }
 
-       opts.map_fd = map_fd;
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = map_fd;
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
        if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
                goto out;
@@ -730,6 +742,7 @@ static void test_bpf_percpu_array_map(void)
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        struct bpf_iter_bpf_percpu_array_map *skel;
        __u32 expected_key = 0, expected_val = 0;
+       union bpf_iter_link_info linfo;
        int err, i, j, map_fd, iter_fd;
        struct bpf_link *link;
        char buf[64];
@@ -765,7 +778,10 @@ static void test_bpf_percpu_array_map(void)
                        goto out;
        }
 
-       opts.map_fd = map_fd;
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = map_fd;
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
        if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
                goto out;
@@ -803,6 +819,7 @@ static void test_bpf_sk_storage_map(void)
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        int err, i, len, map_fd, iter_fd, num_sockets;
        struct bpf_iter_bpf_sk_storage_map *skel;
+       union bpf_iter_link_info linfo;
        int sock_fd[3] = {-1, -1, -1};
        __u32 val, expected_val = 0;
        struct bpf_link *link;
@@ -829,7 +846,10 @@ static void test_bpf_sk_storage_map(void)
                        goto out;
        }
 
-       opts.map_fd = map_fd;
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = map_fd;
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
        if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
                goto out;
@@ -871,6 +891,7 @@ static void test_rdonly_buf_out_of_bound(void)
 {
        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
        struct bpf_iter_test_kern5 *skel;
+       union bpf_iter_link_info linfo;
        struct bpf_link *link;
 
        skel = bpf_iter_test_kern5__open_and_load();
@@ -878,7 +899,10 @@ static void test_rdonly_buf_out_of_bound(void)
                  "skeleton open_and_load failed\n"))
                return;
 
-       opts.map_fd = bpf_map__fd(skel->maps.hashmap1);
+       memset(&linfo, 0, sizeof(linfo));
+       linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
+       opts.link_info = &linfo;
+       opts.link_info_len = sizeof(linfo);
        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
        if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
                bpf_link__destroy(link);
index 7afa416..284d592 100644 (file)
@@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
                /* Check getting link info */
                info_len = sizeof(struct bpf_link_info) * 2;
                bzero(&link_infos[i], info_len);
-               link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
+               link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
                link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
                err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
                                             &link_infos[i], &info_len);
                if (CHECK(err ||
                          link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
                          link_infos[i].prog_id != prog_infos[i].id ||
-                         link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
-                         strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
+                         link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
+                         strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
                                 "sys_enter") ||
                          info_len != sizeof(struct bpf_link_info),
                          "get-link-info(fd)",
@@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
                          link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
                          link_infos[i].id,
                          link_infos[i].prog_id, prog_infos[i].id,
-                         (char *)link_infos[i].raw_tracepoint.tp_name,
+                         (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
                          "sys_enter"))
                        goto done;
 
index cb33a7e..39fb81d 100644 (file)
@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
 static struct btf_dump_test_case {
        const char *name;
        const char *file;
+       bool known_ptr_sz;
        struct btf_dump_opts opts;
 } btf_dump_test_cases[] = {
-       {"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
-       {"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
-       {"btf_dump: padding", "btf_dump_test_case_padding", {}},
-       {"btf_dump: packing", "btf_dump_test_case_packing", {}},
-       {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
-       {"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
-       {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
+       {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
+       {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
+       {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
+       {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
+       {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
+       {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
+       {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
 };
 
 static int btf_dump_all_types(const struct btf *btf,
@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
                goto done;
        }
 
+       /* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
+        * so it's impossible to determine correct pointer size; but if they
+        * do, it should be 8 regardless of host architecture, becaues BPF
+        * target is always 64-bit
+        */
+       if (!t->known_ptr_sz) {
+               btf__set_pointer_size(btf, 8);
+       } else {
+               CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
+                     8, btf__pointer_size(btf));
+       }
+
        snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
        fd = mkstemp(out_file);
        if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
index b093787..1931a15 100644 (file)
@@ -159,8 +159,8 @@ void test_core_extern(void)
                exp = (uint64_t *)&t->data;
                for (j = 0; j < n; j++) {
                        CHECK(got[j] != exp[j], "check_res",
-                             "result #%d: expected %lx, but got %lx\n",
-                              j, exp[j], got[j]);
+                             "result #%d: expected %llx, but got %llx\n",
+                              j, (__u64)exp[j], (__u64)got[j]);
                }
 cleanup:
                test_core_extern__destroy(skel);
index 084ed26..a54eafc 100644 (file)
                .union_sz = sizeof(((type *)0)->union_field),           \
                .arr_sz = sizeof(((type *)0)->arr_field),               \
                .arr_elem_sz = sizeof(((type *)0)->arr_field[0]),       \
-               .ptr_sz = sizeof(((type *)0)->ptr_field),               \
-               .enum_sz = sizeof(((type *)0)->enum_field),     \
+               .ptr_sz = 8, /* always 8-byte pointer for BPF */        \
+               .enum_sz = sizeof(((type *)0)->enum_field),             \
        }
 
 #define SIZE_CASE(name) {                                              \
@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
                .sb4 = -1,
                .sb20 = -0x17654321,
                .u32 = 0xBEEF,
-               .s32 = -0x3FEDCBA987654321,
+               .s32 = -0x3FEDCBA987654321LL,
        }),
        BITFIELDS_CASE(bitfields___bitfield_vs_int, {
-               .ub1 = 0xFEDCBA9876543210,
+               .ub1 = 0xFEDCBA9876543210LL,
                .ub2 = 0xA6,
-               .ub7 = -0x7EDCBA987654321,
-               .sb4 = -0x6123456789ABCDE,
-               .sb20 = 0xD00D,
+               .ub7 = -0x7EDCBA987654321LL,
+               .sb4 = -0x6123456789ABCDELL,
+               .sb20 = 0xD00DLL,
                .u32 = -0x76543,
-               .s32 = 0x0ADEADBEEFBADB0B,
+               .s32 = 0x0ADEADBEEFBADB0BLL,
        }),
        BITFIELDS_CASE(bitfields___just_big_enough, {
-               .ub1 = 0xF,
-               .ub2 = 0x0812345678FEDCBA,
+               .ub1 = 0xFLL,
+               .ub2 = 0x0812345678FEDCBALL,
        }),
        BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
 
index a895bfe..197d0d2 100644 (file)
@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
        __u32 duration = 0, retval;
        struct bpf_map *data_map;
        const int zero = 0;
-       u64 *result = NULL;
+       __u64 *result = NULL;
 
        err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
                            &pkt_obj, &pkt_fd);
@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
 
        link = calloc(sizeof(struct bpf_link *), prog_cnt);
        prog = calloc(sizeof(struct bpf_program *), prog_cnt);
-       result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
+       result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
        if (CHECK(!link || !prog || !result, "alloc_memory",
                  "failed to alloc memory"))
                goto close_prog;
@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
                goto close_prog;
 
        for (i = 0; i < prog_cnt; i++)
-               if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
+               if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
                          result[i]))
                        goto close_prog;
 
index f11f187..cd6dc80 100644 (file)
@@ -591,7 +591,7 @@ void test_flow_dissector(void)
                CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
                           err || tattr.retval != 1,
                           tests[i].name,
-                          "err %d errno %d retval %d duration %d size %u/%lu\n",
+                          "err %d errno %d retval %d duration %d size %u/%zu\n",
                           err, errno, tattr.retval, tattr.duration,
                           tattr.data_size_out, sizeof(flow_keys));
                CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
index e3cb62b..9efa7e5 100644 (file)
@@ -5,7 +5,7 @@
 static void test_global_data_number(struct bpf_object *obj, __u32 duration)
 {
        int i, err, map_fd;
-       uint64_t num;
+       __u64 num;
 
        map_fd = bpf_find_map(__func__, obj, "result_number");
        if (CHECK_FAIL(map_fd < 0))
@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
        struct {
                char *name;
                uint32_t key;
-               uint64_t num;
+               __u64 num;
        } tests[] = {
                { "relocate .bss reference",     0, 0 },
                { "relocate .data reference",    1, 42 },
@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
        for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
                err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
                CHECK(err || num != tests[i].num, tests[i].name,
-                     "err %d result %lx expected %lx\n",
+                     "err %d result %llx expected %llx\n",
                      err, num, tests[i].num);
        }
 }
index 43d0b55..9c3c5c0 100644 (file)
@@ -21,7 +21,7 @@ void test_mmap(void)
        const long page_size = sysconf(_SC_PAGE_SIZE);
        int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
        struct bpf_map *data_map, *bss_map;
-       void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
+       void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
        struct test_mmap__bss *bss_data;
        struct bpf_map_info map_info;
        __u32 map_info_sz = sizeof(map_info);
@@ -183,16 +183,23 @@ void test_mmap(void)
 
        /* check some more advanced mmap() manipulations */
 
+       tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
+                         -1, 0);
+       if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
+               goto cleanup;
+
        /* map all but last page: pages 1-3 mapped */
-       tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
+       tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
                          data_map_fd, 0);
-       if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
+       if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
+               munmap(tmp0, 4 * page_size);
                goto cleanup;
+       }
 
        /* unmap second page: pages 1, 3 mapped */
        err = munmap(tmp1 + page_size, page_size);
        if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
-               munmap(tmp1, map_sz);
+               munmap(tmp1, 4 * page_size);
                goto cleanup;
        }
 
@@ -201,7 +208,7 @@ void test_mmap(void)
                    MAP_SHARED | MAP_FIXED, data_map_fd, 0);
        if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
                munmap(tmp1, page_size);
-               munmap(tmp1 + 2*page_size, page_size);
+               munmap(tmp1 + 2*page_size, 2 * page_size);
                goto cleanup;
        }
        CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
@@ -211,7 +218,7 @@ void test_mmap(void)
        tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
                    data_map_fd, 0);
        if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
-               munmap(tmp1, 3 * page_size); /* unmap page 1 */
+               munmap(tmp1, 4 * page_size); /* unmap page 1 */
                goto cleanup;
        }
        CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
index dde2b7a..935a294 100644 (file)
@@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
              "err %d errno %d retval %d\n", err, errno, tattr.retval);
 
        CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
-             "incorrect output size, want %lu have %u\n",
+             "incorrect output size, want %zu have %u\n",
              sizeof(pkt_v4), tattr.data_size_out);
 
        CHECK_ATTR(buf[5] != 0, "overflow",
index 504abb7..7043e6d 100644 (file)
@@ -48,21 +48,19 @@ static void test_send_signal_common(struct perf_event_attr *attr,
                close(pipe_p2c[1]); /* close write */
 
                /* notify parent signal handler is installed */
-               write(pipe_c2p[1], buf, 1);
+               CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
 
                /* make sure parent enabled bpf program to send_signal */
-               read(pipe_p2c[0], buf, 1);
+               CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
 
                /* wait a little for signal handler */
                sleep(1);
 
-               if (sigusr1_received)
-                       write(pipe_c2p[1], "2", 1);
-               else
-                       write(pipe_c2p[1], "0", 1);
+               buf[0] = sigusr1_received ? '2' : '0';
+               CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
 
                /* wait for parent notification and exit */
-               read(pipe_p2c[0], buf, 1);
+               CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
 
                close(pipe_c2p[1]);
                close(pipe_p2c[0]);
@@ -99,7 +97,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
        }
 
        /* wait until child signal handler installed */
-       read(pipe_c2p[0], buf, 1);
+       CHECK(read(pipe_c2p[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
 
        /* trigger the bpf send_signal */
        skel->bss->pid = pid;
@@ -107,7 +105,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
        skel->bss->signal_thread = signal_thread;
 
        /* notify child that bpf program can send_signal now */
-       write(pipe_p2c[1], buf, 1);
+       CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
 
        /* wait for result */
        err = read(pipe_c2p[0], buf, 1);
@@ -121,7 +119,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
        CHECK(buf[0] != '2', test_name, "incorrect result\n");
 
        /* notify child safe to exit */
-       write(pipe_p2c[1], buf, 1);
+       CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
 
 disable_pmu:
        close(pmu_fd);
index c571584..9ff0412 100644 (file)
@@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss)
        v6->sin6_addr.s6_addr[10] = 0xff;
        v6->sin6_addr.s6_addr[11] = 0xff;
        memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
+       memset(&v6->sin6_addr.s6_addr[0], 0, 10);
 }
 
 static int udp_recv_send(int server_fd)
index 25de86a..fafedda 100644 (file)
@@ -81,7 +81,7 @@ void test_skb_ctx(void)
 
        CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
                   "ctx_size_out",
-                  "incorrect output size, want %lu have %u\n",
+                  "incorrect output size, want %zu have %u\n",
                   sizeof(skb), tattr.ctx_size_out);
 
        for (i = 0; i < 5; i++)
index f002e30..11a769e 100644 (file)
@@ -6,11 +6,13 @@ static __u64 read_perf_max_sample_freq(void)
 {
        __u64 sample_freq = 5000; /* fallback to 5000 on error */
        FILE *f;
+       __u32 duration = 0;
 
        f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
        if (f == NULL)
                return sample_freq;
-       fscanf(f, "%llu", &sample_freq);
+       CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
+                 "return default value: 5000,err %d\n", -errno);
        fclose(f);
        return sample_freq;
 }
index c75525e..dd324b4 100644 (file)
@@ -44,25 +44,25 @@ void test_varlen(void)
        CHECK_VAL(bss->payload1_len2, size2);
        CHECK_VAL(bss->total1, size1 + size2);
        CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload2_len1, size1);
        CHECK_VAL(data->payload2_len2, size2);
        CHECK_VAL(data->total2, size1 + size2);
        CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload3_len1, size1);
        CHECK_VAL(data->payload3_len2, size2);
        CHECK_VAL(data->total3, size1 + size2);
        CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload4_len1, size1);
        CHECK_VAL(data->payload4_len2, size2);
        CHECK_VAL(data->total4, size1 + size2);
        CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 cleanup:
        test_varlen__destroy(skel);
 }
index 34d8471..69139ed 100644 (file)
@@ -1,5 +1,10 @@
 #include <stdint.h>
 #include <stdbool.h>
+
+void preserce_ptr_sz_fn(long x) {}
+
+#define __bpf_aligned __attribute__((aligned(8)))
+
 /*
  * KERNEL
  */
@@ -444,51 +449,51 @@ struct core_reloc_primitives {
        char a;
        int b;
        enum core_reloc_primitives_enum c;
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___diff_enum_def {
        char a;
        int b;
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
        enum {
                X = 100,
                Y = 200,
-       } c; /* inline enum def with differing set of values */
+       } c __bpf_aligned; /* inline enum def with differing set of values */
 };
 
 struct core_reloc_primitives___diff_func_proto {
-       void (*f)(int); /* incompatible function prototype */
-       void *d;
-       enum core_reloc_primitives_enum c;
+       void (*f)(int) __bpf_aligned; /* incompatible function prototype */
+       void *d __bpf_aligned;
+       enum core_reloc_primitives_enum c __bpf_aligned;
        int b;
        char a;
 };
 
 struct core_reloc_primitives___diff_ptr_type {
-       const char * const d; /* different pointee type + modifiers */
-       char a;
+       const char * const d __bpf_aligned; /* different pointee type + modifiers */
+       char a __bpf_aligned;
        int b;
        enum core_reloc_primitives_enum c;
-       int (*f)(const char *);
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_enum {
        char a[1];
        int b;
        int c; /* int instead of enum */
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_int {
        char a[1];
-       int *b; /* ptr instead of int */
-       enum core_reloc_primitives_enum c;
-       void *d;
-       int (*f)(const char *);
+       int *b __bpf_aligned; /* ptr instead of int */
+       enum core_reloc_primitives_enum c __bpf_aligned;
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_ptr {
@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
        int b;
        enum core_reloc_primitives_enum c;
        int d; /* int instead of ptr */
-       int (*f)(const char *);
+       int (*f)(const char *) __bpf_aligned;
 };
 
 /*
@@ -507,7 +512,7 @@ struct core_reloc_mods_output {
 };
 
 typedef const int int_t;
-typedef const char *char_ptr_t;
+typedef const char *char_ptr_t __bpf_aligned;
 typedef const int arr_t[7];
 
 struct core_reloc_mods_substruct {
@@ -523,9 +528,9 @@ typedef struct {
 struct core_reloc_mods {
        int a;
        int_t b;
-       char *c;
+       char *c __bpf_aligned;
        char_ptr_t d;
-       int e[3];
+       int e[3] __bpf_aligned;
        arr_t f;
        struct core_reloc_mods_substruct g;
        core_reloc_mods_substruct_t h;
@@ -535,9 +540,9 @@ struct core_reloc_mods {
 struct core_reloc_mods___mod_swap {
        int b;
        int_t a;
-       char *d;
+       char *d __bpf_aligned;
        char_ptr_t c;
-       int f[3];
+       int f[3] __bpf_aligned;
        arr_t e;
        struct {
                int y;
@@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
 typedef arr2_t arr3_t;
 typedef arr3_t arr4_t;
 
-typedef const char * const volatile fancy_char_ptr_t;
+typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
 
 typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
 
@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
        arr4_t e;
        fancy_char_ptr_t d;
        fancy_char_ptr_t c;
-       int3_t b;
+       int3_t b __bpf_aligned;
        int3_t a;
 };
 
@@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change {
        int8_t          sb4: 1;         /*  4 ->  1 */
        int32_t         sb20: 30;       /* 20 -> 30 */
        /* non-bitfields */
-       uint16_t        u32;            /* 32 -> 16 */
-       int64_t         s32;            /* 32 -> 64 */
+       uint16_t        u32;                    /* 32 -> 16 */
+       int64_t         s32 __bpf_aligned;      /* 32 -> 64 */
 };
 
 /* turn bitfield into non-bitfield and vice versa */
 struct core_reloc_bitfields___bitfield_vs_int {
        uint64_t        ub1;            /*  3 -> 64 non-bitfield */
        uint8_t         ub2;            /* 20 ->  8 non-bitfield */
-       int64_t         ub7;            /*  7 -> 64 non-bitfield signed */
-       int64_t         sb4;            /*  4 -> 64 non-bitfield signed */
-       uint64_t        sb20;           /* 20 -> 16 non-bitfield unsigned */
-       int32_t         u32: 20;        /* 32 non-bitfield -> 20 bitfield */
-       uint64_t        s32: 60;        /* 32 non-bitfield -> 60 bitfield */
+       int64_t         ub7 __bpf_aligned;      /*  7 -> 64 non-bitfield signed */
+       int64_t         sb4 __bpf_aligned;      /*  4 -> 64 non-bitfield signed */
+       uint64_t        sb20 __bpf_aligned;     /* 20 -> 16 non-bitfield unsigned */
+       int32_t         u32: 20;                /* 32 non-bitfield -> 20 bitfield */
+       uint64_t        s32: 60 __bpf_aligned;  /* 32 non-bitfield -> 60 bitfield */
 };
 
 struct core_reloc_bitfields___just_big_enough {
index 1f1966e..3e6912e 100644 (file)
@@ -54,6 +54,7 @@ SEC("sockops")
 int bpf_testcb(struct bpf_sock_ops *skops)
 {
        char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
+       struct bpf_sock_ops *reuse = skops;
        struct tcphdr *thdr;
        int good_call_rv = 0;
        int bad_call_rv = 0;
@@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops)
        int v = 0;
        int op;
 
+       /* Test reading fields in bpf_sock_ops using single register */
+       asm volatile (
+               "%[reuse] = *(u32 *)(%[reuse] +96)"
+               : [reuse] "+r"(reuse)
+               :);
+
+       asm volatile (
+               "%[op] = *(u32 *)(%[skops] +96)"
+               : [op] "+r"(op)
+               : [skops] "r"(skops)
+               :);
+
+       asm volatile (
+               "r9 = %[skops];\n"
+               "r8 = *(u32 *)(r9 +164);\n"
+               "*(u32 *)(r9 +164) = r8;\n"
+               :: [skops] "r"(skops)
+               : "r9", "r8");
+
+       asm volatile (
+               "r1 = %[skops];\n"
+               "r1 = *(u64 *)(r1 +184);\n"
+               "if r1 == 0 goto +1;\n"
+               "r1 = *(u32 *)(r1 +4);\n"
+               :: [skops] "r"(skops):"r1");
+
+       asm volatile (
+               "r9 = %[skops];\n"
+               "r9 = *(u64 *)(r9 +184);\n"
+               "if r9 == 0 goto +1;\n"
+               "r9 = *(u32 *)(r9 +4);\n"
+               :: [skops] "r"(skops):"r9");
+
+       asm volatile (
+               "r1 = %[skops];\n"
+               "r2 = *(u64 *)(r1 +184);\n"
+               "if r2 == 0 goto +1;\n"
+               "r2 = *(u32 *)(r2 +4);\n"
+               :: [skops] "r"(skops):"r1", "r2");
+
        op = (int) skops->op;
 
        update_event_map(op);
index cd4b72c..913acdf 100644 (file)
@@ -15,9 +15,9 @@ int test_pid = 0;
 bool capture = false;
 
 /* .bss */
-long payload1_len1 = 0;
-long payload1_len2 = 0;
-long total1 = 0;
+__u64 payload1_len1 = 0;
+__u64 payload1_len2 = 0;
+__u64 total1 = 0;
 char payload1[MAX_LEN + MAX_LEN] = {};
 
 /* .data */
diff --git a/tools/testing/selftests/bpf/settings b/tools/testing/selftests/bpf/settings
new file mode 100644 (file)
index 0000000..e7b9417
--- /dev/null
@@ -0,0 +1 @@
+timeout=0
index 305fae8..c75fc64 100644 (file)
@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
        info_garbage.garbage = 0;
        err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
        if (CHECK(err || info_len != sizeof(*info),
-                 "err:%d errno:%d info_len:%u sizeof(*info):%lu",
+                 "err:%d errno:%d info_len:%u sizeof(*info):%zu",
                  err, errno, info_len, sizeof(*info))) {
                err = -1;
                goto done;
@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
        if (CHECK(err || !info.id || info_len != sizeof(info) ||
                  info.btf_size != raw_btf_size ||
                  (ret = memcmp(raw_btf, user_btf, expected_nbytes)),
-                 "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
+                 "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
                  err, errno, info.id, info_len, sizeof(info),
                  raw_btf_size, info.btf_size, expected_nbytes, ret)) {
                err = -1;
@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
 
                nexpected_line = snprintf(expected_line, line_size,
                                          "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
-                                         "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+                                         "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
                                          "%u,0x%x,[[%d,%d],[%d,%d]]}\n",
                                          percpu_map ? "\tcpu" : "",
                                          percpu_map ? cpu : next_key,
@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
                                          v->unused_bits2a,
                                          v->bits28,
                                          v->unused_bits2b,
-                                         v->ui64,
+                                         (__u64)v->ui64,
                                          v->ui8a[0], v->ui8a[1],
                                          v->ui8a[2], v->ui8a[3],
                                          v->ui8a[4], v->ui8a[5],
index 6e09bf7..dbb820d 100644 (file)
@@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
        return (__u64) (unsigned long) ptr;
 }
 
+static inline void *u64_to_ptr(__u64 ptr)
+{
+       return (void *) (unsigned long) ptr;
+}
+
 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
 int compare_map_keys(int map1_fd, int map2_fd);
 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
index 8549b31..73da7fe 100644 (file)
@@ -124,17 +124,24 @@ int main(int argc, char **argv)
        sprintf(test_script,
                "iptables -A INPUT -p tcp --dport %d -j DROP",
                TESTPORT);
-       system(test_script);
+       if (system(test_script)) {
+               printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
+               goto err;
+       }
 
        sprintf(test_script,
                "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
                TESTPORT);
-       system(test_script);
+       if (system(test_script))
+               printf("execute command: %s, err %d\n", test_script, -errno);
 
        sprintf(test_script,
                "iptables -D INPUT -p tcp --dport %d -j DROP",
                TESTPORT);
-       system(test_script);
+       if (system(test_script)) {
+               printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
+               goto err;
+       }
 
        rv = bpf_map_lookup_elem(bpf_map__fd(global_map), &key, &g);
        if (rv != 0) {
index 5224dae..0941aa1 100644 (file)
 #include "cgroup_util.h"
 
 
+/*
+ * Memory cgroup charging and vmstat data aggregation is performed using
+ * percpu batches 32 pages big (look at MEMCG_CHARGE_BATCH). So the maximum
+ * discrepancy between charge and vmstat entries is number of cpus multiplied
+ * by 32 pages multiplied by 2.
+ */
+#define MAX_VMSTAT_ERROR (4096 * 32 * 2 * get_nprocs())
+
+
 static int alloc_dcache(const char *cgroup, void *arg)
 {
        unsigned long i;
@@ -180,7 +189,7 @@ static int test_kmem_memcg_deletion(const char *root)
                goto cleanup;
 
        sum = slab + anon + file + kernel_stack;
-       if (abs(sum - current) < 4096 * 32 * 2 * get_nprocs()) {
+       if (abs(sum - current) < MAX_VMSTAT_ERROR) {
                ret = KSFT_PASS;
        } else {
                printf("memory.current = %ld\n", current);
@@ -331,6 +340,64 @@ cleanup:
        return ret;
 }
 
+/*
+ * This test creates a sub-tree with 1000 memory cgroups.
+ * Then it checks that the memory.current on the parent level
+ * is greater than 0 and approximates matches the percpu value
+ * from memory.stat.
+ */
+static int test_percpu_basic(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child;
+       long current, percpu;
+       int i;
+
+       parent = cg_name(root, "percpu_basic_test");
+       if (!parent)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+               goto cleanup;
+
+       for (i = 0; i < 1000; i++) {
+               child = cg_name_indexed(parent, "child", i);
+               if (!child)
+                       return -1;
+
+               if (cg_create(child))
+                       goto cleanup_children;
+
+               free(child);
+       }
+
+       current = cg_read_long(parent, "memory.current");
+       percpu = cg_read_key_long(parent, "memory.stat", "percpu ");
+
+       if (current > 0 && percpu > 0 && abs(current - percpu) <
+           MAX_VMSTAT_ERROR)
+               ret = KSFT_PASS;
+       else
+               printf("memory.current %ld\npercpu %ld\n",
+                      current, percpu);
+
+cleanup_children:
+       for (i = 0; i < 1000; i++) {
+               child = cg_name_indexed(parent, "child", i);
+               cg_destroy(child);
+               free(child);
+       }
+
+cleanup:
+       cg_destroy(parent);
+       free(parent);
+
+       return ret;
+}
+
 #define T(x) { x, #x }
 struct kmem_test {
        int (*fn)(const char *root);
@@ -341,6 +408,7 @@ struct kmem_test {
        T(test_kmem_proc_kpagecgroup),
        T(test_kmem_kernel_stacks),
        T(test_kmem_dead_cgroups),
+       T(test_percpu_basic),
 };
 #undef T
 
index 94b02a1..344a99c 100644 (file)
@@ -10,3 +10,4 @@ execveat.denatured
 /recursion-depth
 xxxxxxxx*
 pipe
+S_I*.test
index 4453b8f..0a13b11 100644 (file)
@@ -3,7 +3,7 @@ CFLAGS = -Wall
 CFLAGS += -Wno-nonnull
 CFLAGS += -D_GNU_SOURCE
 
-TEST_PROGS := binfmt_script
+TEST_PROGS := binfmt_script non-regular
 TEST_GEN_PROGS := execveat
 TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe
 # Makefile is a run-time dependency, since it's accessed by the execveat test
@@ -11,7 +11,8 @@ TEST_FILES := Makefile
 
 TEST_GEN_PROGS += recursion-depth
 
-EXTRA_CLEAN := $(OUTPUT)/subdir.moved $(OUTPUT)/execveat.moved $(OUTPUT)/xxxxx*
+EXTRA_CLEAN := $(OUTPUT)/subdir.moved $(OUTPUT)/execveat.moved $(OUTPUT)/xxxxx*        \
+              $(OUTPUT)/S_I*.test
 
 include ../lib.mk
 
diff --git a/tools/testing/selftests/exec/non-regular.c b/tools/testing/selftests/exec/non-regular.c
new file mode 100644 (file)
index 0000000..cd3a34a
--- /dev/null
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <sys/types.h>
+
+#include "../kselftest_harness.h"
+
+/* Remove a file, ignoring the result if it didn't exist. */
+void rm(struct __test_metadata *_metadata, const char *pathname,
+       int is_dir)
+{
+       int rc;
+
+       if (is_dir)
+               rc = rmdir(pathname);
+       else
+               rc = unlink(pathname);
+
+       if (rc < 0) {
+               ASSERT_EQ(errno, ENOENT) {
+                       TH_LOG("Not ENOENT: %s", pathname);
+               }
+       } else {
+               ASSERT_EQ(rc, 0) {
+                       TH_LOG("Failed to remove: %s", pathname);
+               }
+       }
+}
+
+FIXTURE(file) {
+       char *pathname;
+       int is_dir;
+};
+
+FIXTURE_VARIANT(file)
+{
+       const char *name;
+       int expected;
+       int is_dir;
+       void (*setup)(struct __test_metadata *_metadata,
+                     FIXTURE_DATA(file) *self,
+                     const FIXTURE_VARIANT(file) *variant);
+       int major, minor, mode; /* for mknod() */
+};
+
+void setup_link(struct __test_metadata *_metadata,
+               FIXTURE_DATA(file) *self,
+               const FIXTURE_VARIANT(file) *variant)
+{
+       const char * const paths[] = {
+               "/bin/true",
+               "/usr/bin/true",
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(paths); i++) {
+               if (access(paths[i], X_OK) == 0) {
+                       ASSERT_EQ(symlink(paths[i], self->pathname), 0);
+                       return;
+               }
+       }
+       ASSERT_EQ(1, 0) {
+               TH_LOG("Could not find viable 'true' binary");
+       }
+}
+
+FIXTURE_VARIANT_ADD(file, S_IFLNK)
+{
+       .name = "S_IFLNK",
+       .expected = ELOOP,
+       .setup = setup_link,
+};
+
+void setup_dir(struct __test_metadata *_metadata,
+              FIXTURE_DATA(file) *self,
+              const FIXTURE_VARIANT(file) *variant)
+{
+       ASSERT_EQ(mkdir(self->pathname, 0755), 0);
+}
+
+FIXTURE_VARIANT_ADD(file, S_IFDIR)
+{
+       .name = "S_IFDIR",
+       .is_dir = 1,
+       .expected = EACCES,
+       .setup = setup_dir,
+};
+
+void setup_node(struct __test_metadata *_metadata,
+               FIXTURE_DATA(file) *self,
+               const FIXTURE_VARIANT(file) *variant)
+{
+       dev_t dev;
+       int rc;
+
+       dev = makedev(variant->major, variant->minor);
+       rc = mknod(self->pathname, 0755 | variant->mode, dev);
+       ASSERT_EQ(rc, 0) {
+               if (errno == EPERM)
+                       SKIP(return, "Please run as root; cannot mknod(%s)",
+                               variant->name);
+       }
+}
+
+FIXTURE_VARIANT_ADD(file, S_IFBLK)
+{
+       .name = "S_IFBLK",
+       .expected = EACCES,
+       .setup = setup_node,
+       /* /dev/loop0 */
+       .major = 7,
+       .minor = 0,
+       .mode = S_IFBLK,
+};
+
+FIXTURE_VARIANT_ADD(file, S_IFCHR)
+{
+       .name = "S_IFCHR",
+       .expected = EACCES,
+       .setup = setup_node,
+       /* /dev/zero */
+       .major = 1,
+       .minor = 5,
+       .mode = S_IFCHR,
+};
+
+void setup_fifo(struct __test_metadata *_metadata,
+               FIXTURE_DATA(file) *self,
+               const FIXTURE_VARIANT(file) *variant)
+{
+       ASSERT_EQ(mkfifo(self->pathname, 0755), 0);
+}
+
+FIXTURE_VARIANT_ADD(file, S_IFIFO)
+{
+       .name = "S_IFIFO",
+       .expected = EACCES,
+       .setup = setup_fifo,
+};
+
+FIXTURE_SETUP(file)
+{
+       ASSERT_GT(asprintf(&self->pathname, "%s.test", variant->name), 6);
+       self->is_dir = variant->is_dir;
+
+       rm(_metadata, self->pathname, variant->is_dir);
+       variant->setup(_metadata, self, variant);
+}
+
+FIXTURE_TEARDOWN(file)
+{
+       rm(_metadata, self->pathname, self->is_dir);
+}
+
+TEST_F(file, exec_errno)
+{
+       char * const argv[2] = { (char * const)self->pathname, NULL };
+
+       EXPECT_LT(execv(argv[0], argv), 0);
+       EXPECT_EQ(errno, variant->expected);
+}
+
+/* S_IFSOCK */
+FIXTURE(sock)
+{
+       int fd;
+};
+
+FIXTURE_SETUP(sock)
+{
+       self->fd = socket(AF_INET, SOCK_STREAM, 0);
+       ASSERT_GE(self->fd, 0);
+}
+
+FIXTURE_TEARDOWN(sock)
+{
+       if (self->fd >= 0)
+               ASSERT_EQ(close(self->fd), 0);
+}
+
+TEST_F(sock, exec_errno)
+{
+       char * const argv[2] = { " magic socket ", NULL };
+       char * const envp[1] = { NULL };
+
+       EXPECT_LT(fexecve(self->fd, argv, envp), 0);
+       EXPECT_EQ(errno, EACCES);
+}
+
+TEST_HARNESS_MAIN
index ea21472..afd4238 100755 (executable)
@@ -343,7 +343,7 @@ kmod_test_0001_driver()
 
        kmod_defaults_driver
        config_num_threads 1
-       printf '\000' >"$DIR"/config_test_driver
+       printf $NAME >"$DIR"/config_test_driver
        config_trigger ${FUNCNAME[0]}
        config_expect_result ${FUNCNAME[0]} MODULE_NOT_FOUND
 }
@@ -354,7 +354,7 @@ kmod_test_0001_fs()
 
        kmod_defaults_fs
        config_num_threads 1
-       printf '\000' >"$DIR"/config_test_fs
+       printf $NAME >"$DIR"/config_test_fs
        config_trigger ${FUNCNAME[0]}
        config_expect_result ${FUNCNAME[0]} -EINVAL
 }
index 18c5de5..bf361f3 100755 (executable)
@@ -180,6 +180,8 @@ setup()
                        ;;
                r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
                       ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
+                      ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
+                      ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
 
                       ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
                       ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
index 2499824..8df5cb8 100644 (file)
@@ -1,4 +1,6 @@
 CONFIG_MPTCP=y
 CONFIG_MPTCP_IPV6=y
+CONFIG_INET_DIAG=m
+CONFIG_INET_MPTCP_DIAG=m
 CONFIG_VETH=y
 CONFIG_NET_SCH_NETEM=m
index cad6f73..090620c 100644 (file)
@@ -406,10 +406,11 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
 
                                /* ... but we still receive.
                                 * Close our write side, ev. give some time
-                                * for address notification
+                                * for address notification and/or checking
+                                * the current status
                                 */
-                               if (cfg_join)
-                                       usleep(400000);
+                               if (cfg_wait)
+                                       usleep(cfg_wait);
                                shutdown(peerfd, SHUT_WR);
                        } else {
                                if (errno == EINTR)
@@ -427,7 +428,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
        }
 
        /* leave some time for late join/announce */
-       if (cfg_wait)
+       if (cfg_join)
                usleep(cfg_wait);
 
        close(peerfd);
index d3e0809..a47d1d8 100755 (executable)
@@ -2,13 +2,18 @@
 # SPDX-License-Identifier: GPL-2.0
 #
 # This tests basic flowtable functionality.
-# Creates following topology:
+# Creates following default topology:
 #
 # Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000)
 # Router1 is the one doing flow offloading, Router2 has no special
 # purpose other than having a link that is smaller than either Originator
 # and responder, i.e. TCPMSS announced values are too large and will still
 # result in fragmentation and/or PMTU discovery.
+#
+# You can check with different Orgininator/Link/Responder MTU eg:
+# sh nft_flowtable.sh -o1000 -l500 -r100
+#
+
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -21,29 +26,18 @@ ns2out=""
 
 log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
 
-nft --version > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without nft tool"
-       exit $ksft_skip
-fi
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without ip tool"
-       exit $ksft_skip
-fi
-
-which nc > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without nc (netcat)"
-       exit $ksft_skip
-fi
+checktool (){
+       $1 > /dev/null 2>&1
+       if [ $? -ne 0 ];then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
 
-ip netns add nsr1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace"
-       exit $ksft_skip
-fi
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add nsr1" "create net namespace"
 
 ip netns add ns1
 ip netns add ns2
@@ -89,11 +83,24 @@ ip -net nsr2 addr add dead:2::1/64 dev veth1
 # ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers
 # is NOT the lowest link mtu.
 
-ip -net nsr1 link set veth0 mtu 9000
-ip -net ns1 link set eth0 mtu 9000
+omtu=9000
+lmtu=1500
+rmtu=2000
+
+while getopts "o:l:r:" o
+do
+       case $o in
+               o) omtu=$OPTARG;;
+               l) lmtu=$OPTARG;;
+               r) rmtu=$OPTARG;;
+       esac
+done
+
+ip -net nsr1 link set veth0 mtu $omtu
+ip -net ns1 link set eth0 mtu $omtu
 
-ip -net nsr2 link set veth1 mtu 2000
-ip -net ns2 link set eth0 mtu 2000
+ip -net nsr2 link set veth1 mtu $rmtu
+ip -net ns2 link set eth0 mtu $rmtu
 
 # transfer-net between nsr1 and nsr2.
 # these addresses are not used for connections.
@@ -147,7 +154,7 @@ table inet filter {
       # as PMTUd is off.
       # This rule is deleted for the last test, when we expect PMTUd
       # to kick in and ensure all packets meet mtu requirements.
-      meta length gt 1500 accept comment something-to-grep-for
+      meta length gt $lmtu accept comment something-to-grep-for
 
       # next line blocks connection w.o. working offload.
       # we only do this for reverse dir, because we expect packets to
@@ -243,8 +250,14 @@ test_tcp_forwarding_ip()
 
        sleep 3
 
-       kill $lpid
-       kill $cpid
+       if ps -p $lpid > /dev/null;then
+               kill $lpid
+       fi
+
+       if ps -p $cpid > /dev/null;then
+               kill $cpid
+       fi
+
        wait
 
        check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"
index 535720b..7a6d402 100644 (file)
@@ -133,6 +133,8 @@ struct seccomp_data {
 #  define __NR_seccomp 348
 # elif defined(__xtensa__)
 #  define __NR_seccomp 337
+# elif defined(__sh__)
+#  define __NR_seccomp 372
 # else
 #  warning "seccomp syscall number unknown for this architecture"
 #  define __NR_seccomp 0xffff
@@ -1719,6 +1721,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
  * a2 of the current window which is not fixed.
  */
 #define SYSCALL_RET(reg) a[(reg).windowbase * 4 + 2]
+#elif defined(__sh__)
+# define ARCH_REGS     struct pt_regs
+# define SYSCALL_NUM   gpr[3]
+# define SYSCALL_RET   gpr[0]
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1791,7 +1797,7 @@ void change_syscall(struct __test_metadata *_metadata,
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
        defined(__s390__) || defined(__hppa__) || defined(__riscv) || \
-       defined(__xtensa__) || defined(__csky__)
+       defined(__xtensa__) || defined(__csky__) || defined(__sh__)
        {
                regs.SYSCALL_NUM = syscall;
        }
index 91d38a2..93fc5ca 100644 (file)
@@ -942,6 +942,41 @@ TEST_F(hmm, migrate_fault)
 }
 
 /*
+ * Migrate anonymous shared memory to device private memory.
+ */
+TEST_F(hmm, migrate_shared)
+{
+       struct hmm_buffer *buffer;
+       unsigned long npages;
+       unsigned long size;
+       int ret;
+
+       npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
+       ASSERT_NE(npages, 0);
+       size = npages << self->page_shift;
+
+       buffer = malloc(sizeof(*buffer));
+       ASSERT_NE(buffer, NULL);
+
+       buffer->fd = -1;
+       buffer->size = size;
+       buffer->mirror = malloc(size);
+       ASSERT_NE(buffer->mirror, NULL);
+
+       buffer->ptr = mmap(NULL, size,
+                          PROT_READ | PROT_WRITE,
+                          MAP_SHARED | MAP_ANONYMOUS,
+                          buffer->fd, 0);
+       ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+       /* Migrate memory to device. */
+       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ASSERT_EQ(ret, -ENOENT);
+
+       hmm_buffer_free(buffer);
+}
+
+/*
  * Try to migrate various memory types to device private memory.
  */
 TEST_F(hmm2, migrate_mixed)
index dbf14c1..f2640e5 100644 (file)
@@ -42,16 +42,16 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
        (__virtio_test_bit((dev), feature))
 
 /**
- * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * virtio_has_dma_quirk - determine whether this device has the DMA quirk
  * @vdev: the device
  */
-static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
 {
        /*
         * Note the reverse polarity of the quirk feature (compared to most
         * other features), this is for compatibility with legacy systems.
         */
-       return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+       return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
 }
 
 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
index 390f758..dd77768 100644 (file)
@@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work)
         * access remotely.
         */
        mmap_read_lock(mm);
-       get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
+       get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
                        &locked);
        if (locked)
                mmap_read_unlock(mm);
index ef7ed91..d6408bb 100644 (file)
@@ -303,7 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        INIT_LIST_HEAD(&irqfd->list);
        INIT_WORK(&irqfd->inject, irqfd_inject);
        INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
-       seqcount_init(&irqfd->irq_entry_sc);
+       seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
 
        f = fdget(args->fd);
        if (!f.file) {
index 4eaa4e4..67cd0b8 100644 (file)
@@ -1894,7 +1894,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
                 * not call the fault handler, so do it here.
                 */
                bool unlocked = false;
-               r = fixup_user_fault(current, current->mm, addr,
+               r = fixup_user_fault(current->mm, addr,
                                     (write_fault ? FAULT_FLAG_WRITE : 0),
                                     &unlocked);
                if (unlocked)
index 28fda42..c9bb395 100644 (file)
@@ -40,17 +40,21 @@ static int __connect(struct irq_bypass_producer *prod,
        if (prod->add_consumer)
                ret = prod->add_consumer(prod, cons);
 
-       if (!ret) {
-               ret = cons->add_producer(cons, prod);
-               if (ret && prod->del_consumer)
-                       prod->del_consumer(prod, cons);
-       }
+       if (ret)
+               goto err_add_consumer;
+
+       ret = cons->add_producer(cons, prod);
+       if (ret)
+               goto err_add_producer;
 
        if (cons->start)
                cons->start(cons);
        if (prod->start)
                prod->start(prod);
-
+err_add_producer:
+       if (prod->del_consumer)
+               prod->del_consumer(prod, cons);
+err_add_consumer:
        return ret;
 }